diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c @@ -1702,3 +1702,219 @@ vuint64m8_t op1, uint64_t op2, size_t vl) { return vaaddu(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vaadd_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vaaddu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vaaddu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vaadd_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vaaddu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vaaddu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vaadd_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vaaddu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vaaddu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vaadd_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vaaddu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vaaddu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vaadd_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vaaddu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vaaddu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vaadd_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vaaddu_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vaaddu_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c @@ -883,3 +883,75 @@ size_t vl) { return vadc(op1, op2, carryin, vl); } + +// CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadc_vvm_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) { + return vadc_tu(merge, op1, op2, carryin, vl); +} + +// CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadc_vxm_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) { + return vadc_tu(merge, op1, op2, carryin, vl); +} + +// CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadc_vvm_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) { + return vadc_tu(merge, op1, op2, carryin, vl); +} + +// CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadc_vxm_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) { + return vadc_tu(merge, op1, op2, carryin, vl); +} + +// CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadc_vvm_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) { + return vadc_ta(op1, op2, carryin, vl); +} + +// CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadc_vxm_i32mf2_ta(vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) { + return vadc_ta(op1, op2, carryin, vl); +} + +// CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadc_vvm_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) { + return vadc_ta(op1, op2, carryin, vl); +} + +// CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadc_vxm_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) { + return vadc_ta(op1, op2, carryin, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vand(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vand_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vand_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vand_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vand_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vand_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vand_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c @@ -1702,3 +1702,219 @@ vuint64m8_t op1, uint64_t op2, size_t vl) { return vasubu(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vasub_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vasubu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vasubu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vasub_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vasubu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vasubu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vasub_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vasubu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vasubu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vasub_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vasubu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vasubu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vasub_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vasubu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vasubu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vasub_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vasubu_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vasubu_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vdivu(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c @@ -11,7 +11,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -20,7 +20,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -38,7 +38,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -47,7 +47,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -56,7 +56,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -65,7 +65,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -74,7 +74,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -83,7 +83,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -92,7 +92,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -101,7 +101,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -110,7 +110,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -119,7 +119,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { +vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -128,7 +128,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) { +vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -137,7 +137,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { +vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -146,7 +146,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) { +vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -155,7 +155,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { +vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -164,7 +164,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) { +vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -173,7 +173,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { +vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -182,7 +182,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -191,7 +191,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { +vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -200,7 +200,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -209,7 +209,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { +vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -218,7 +218,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -227,7 +227,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { +vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -236,7 +236,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -245,7 +245,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { +vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -254,7 +254,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -263,7 +263,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { +vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -272,7 +272,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { return vfadd(op1, op2, vl); } @@ -281,7 +281,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -290,7 +290,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -299,7 +299,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -308,7 +308,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -317,7 +317,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -326,7 +326,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -335,7 +335,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -344,7 +344,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -353,7 +353,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -362,7 +362,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -371,7 +371,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -380,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -389,7 +389,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfadd_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { +vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -398,7 +398,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfadd_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { +vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -407,7 +407,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { +vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -416,7 +416,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { +vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -425,7 +425,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { +vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -434,7 +434,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { +vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -443,7 +443,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { +vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -452,7 +452,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -461,7 +461,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { +vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -470,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -479,7 +479,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { +vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -488,7 +488,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -497,7 +497,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { +vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -506,7 +506,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -515,7 +515,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { +vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -524,7 +524,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -533,7 +533,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { +vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } @@ -542,6 +542,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfadd_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfadd_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfadd_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfadd_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfadd_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfadd_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c @@ -175,3 +175,57 @@ vfloat64m8_t op1, size_t vl) { return vfclass(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfclass_v_u32mf2_tu(vuint32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfclass_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfclass_v_u32mf2_ta(vfloat32mf2_t op1, size_t vl) { + return vfclass_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfclass_v_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfclass_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfclass_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfclass_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfclass_v_u32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfclass_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfclass_v_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfclass_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c @@ -1032,3 +1032,327 @@ vuint64m8_t src, size_t vl) { return vfcvt_f(mask, maskedoff, src, vl); } + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tu(vint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_x_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_x_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tu(vuint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_xu_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_xu_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tu(vfloat32mf2_t merge, vint32mf2_t src, size_t vl) { + return vfcvt_f_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t merge, vuint32mf2_t src, size_t vl) { + return vfcvt_f_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_x_f_v_i32mf2_ta(vfloat32mf2_t src, size_t vl) { + return vfcvt_x_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_ta(vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_x_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_ta(vfloat32mf2_t src, size_t vl) { + return vfcvt_xu_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_ta(vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_xu_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_ta(vint32mf2_t src, size_t vl) { + return vfcvt_f_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_ta(vuint32mf2_t src, size_t vl) { + return vfcvt_f_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_x_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_x_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_xu_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_xu_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vint32mf2_t src, size_t vl) { + return vfcvt_f_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vuint32mf2_t src, size_t vl) { + return vfcvt_f_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_x_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_x_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_xu_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_xu_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vint32mf2_t src, size_t vl) { + return vfcvt_f_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vuint32mf2_t src, size_t vl) { + return vfcvt_f_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_x_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_x_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_xu_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_xu_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tama(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vfcvt_f_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vfcvt_f_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_x_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_x_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_xu_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_xu_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vint32mf2_t src, size_t vl) { + return vfcvt_f_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vuint32mf2_t src, size_t vl) { + return vfcvt_f_tamu(mask, merge, src, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c @@ -364,3 +364,111 @@ vfloat64m8_t op1, double op2, size_t vl) { return vfdiv(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfdiv_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfdiv_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfdiv_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfdiv_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfdiv_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfdiv_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfdiv_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfdiv_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfdiv_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfdiv_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfdiv_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfdiv_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c @@ -373,3 +373,111 @@ vfloat64m8_t op2, size_t vl) { return vfmacc(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c @@ -373,3 +373,111 @@ vfloat64m8_t op2, size_t vl) { return vfmadd(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c @@ -364,3 +364,111 @@ vfloat64m8_t op1, double op2, size_t vl) { return vfmax(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmax_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmax_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmax_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfmax_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmax_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmax_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmax_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmax_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmax_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmax_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmax_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmax_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c @@ -364,3 +364,111 @@ vfloat64m8_t op1, double op2, size_t vl) { return vfmin(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmin_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmin_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmin_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfmin_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmin_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmin_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmin_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmin_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmin_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmin_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmin_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmin_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c @@ -373,3 +373,111 @@ vfloat64m8_t op2, size_t vl) { return vfmsac(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c @@ -373,3 +373,111 @@ vfloat64m8_t op2, size_t vl) { return vfmsub(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c @@ -364,3 +364,111 @@ vfloat64m8_t op1, double op2, size_t vl) { return vfmul(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmul_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmul_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmul_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfmul_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmul_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmul_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmul_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmul_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmul_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmul_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmul_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmul_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c @@ -1,6 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -1008,3 +1009,435 @@ vfloat64m8_t src, size_t vl) { return vfncvt_rod_f(mask, maskedoff, src, vl); } + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tu(vint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_x_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tu(vint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_x_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tu(vuint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_xu_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tu(vuint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_xu_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tu(vfloat16mf4_t merge, vint32mf2_t src, size_t vl) { + return vfncvt_f_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tu(vfloat16mf4_t merge, vuint32mf2_t src, size_t vl) { + return vfncvt_f_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tu(vfloat16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_f_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_x_f_w_i16mf4_ta(vfloat32mf2_t src, size_t vl) { + return vfncvt_x_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_ta(vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_x_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_ta(vfloat32mf2_t src, size_t vl) { + return vfncvt_xu_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_ta(vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_xu_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_ta(vint32mf2_t src, size_t vl) { + return vfncvt_f_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_ta(vuint32mf2_t src, size_t vl) { + return vfncvt_f_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_ta(vfloat32mf2_t src, size_t vl) { + return vfncvt_f_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_ta(vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tuma(vbool64_t mask, vint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_x_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tuma(vbool64_t mask, vint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_x_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tuma(vbool64_t mask, vuint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_xu_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tuma(vbool64_t mask, vuint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_xu_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tuma(vbool64_t mask, vfloat16mf4_t merge, vint32mf2_t src, size_t vl) { + return vfncvt_f_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tuma(vbool64_t mask, vfloat16mf4_t merge, vuint32mf2_t src, size_t vl) { + return vfncvt_f_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tuma(vbool64_t mask, vfloat16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_f_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tuma(vbool64_t mask, vfloat16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_x_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_x_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_xu_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_xu_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t merge, vint32mf2_t src, size_t vl) { + return vfncvt_f_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t merge, vuint32mf2_t src, size_t vl) { + return vfncvt_f_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_f_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_x_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_x_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_xu_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_xu_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tama(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vfncvt_f_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vfncvt_f_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_f_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tamu(vbool64_t mask, vint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_x_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tamu(vbool64_t mask, vint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_x_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tamu(vbool64_t mask, vuint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_xu_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tamu(vbool64_t mask, vuint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_xu_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tamu(vbool64_t mask, vfloat16mf4_t merge, vint32mf2_t src, size_t vl) { + return vfncvt_f_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tamu(vbool64_t mask, vfloat16mf4_t merge, vuint32mf2_t src, size_t vl) { + return vfncvt_f_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tamu(vbool64_t mask, vfloat16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_f_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tamu(vbool64_t mask, vfloat16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f_tamu(mask, merge, src, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c @@ -374,3 +374,111 @@ double op1, vfloat64m8_t op2, size_t vl) { return vfnmacc(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c @@ -374,3 +374,111 @@ double op1, vfloat64m8_t op2, size_t vl) { return vfnmadd(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c @@ -374,3 +374,111 @@ double op1, vfloat64m8_t op2, size_t vl) { return vfnmsac(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c @@ -374,3 +374,111 @@ double op1, vfloat64m8_t op2, size_t vl) { return vfnmsub(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c @@ -175,3 +175,57 @@ vfloat64m8_t op1, double op2, size_t vl) { return vfrdiv(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrdiv_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrdiv_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfrdiv_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrdiv_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrdiv_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrdiv_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrdiv_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c @@ -175,3 +175,57 @@ vfloat64m8_t op1, size_t vl) { return vfrec7(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrec7_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfrec7_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrec7_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) { + return vfrec7_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrec7_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfrec7_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrec7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfrec7_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrec7_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfrec7_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrec7_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfrec7_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c @@ -175,3 +175,57 @@ vfloat64m8_t op1, size_t vl) { return vfrsqrt7(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c @@ -175,3 +175,57 @@ vfloat64m8_t op1, double op2, size_t vl) { return vfrsub(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsub_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrsub_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsub_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfrsub_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsub_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrsub_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrsub_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsub_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrsub_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsub_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrsub_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c @@ -1086,3 +1086,327 @@ vfloat64m8_t op1, double op2, size_t vl) { return vfsgnjx(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnj_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnj_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjn_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjn_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjx_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjx_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnj_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnj_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjn_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjn_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjx_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjx_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnj_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnj_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjn_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjn_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjx_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjx_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnj_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnj_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjn_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjn_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjx_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjx_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnj_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnj_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjn_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjn_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjx_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjx_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnj_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnj_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjn_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjn_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjx_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjx_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c @@ -199,3 +199,57 @@ size_t vl) { return vfslide1down(mask, maskedoff, src, value, vl); } + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( [[MERGE:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1down_tu(merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1down_vf_f32mf2_ta(vfloat32mf2_t src, float value, size_t vl) { + return vfslide1down_ta(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1down_tuma(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1down_tumu(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1down_tama(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1down_tamu(mask, merge, src, value, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c @@ -194,3 +194,57 @@ size_t vl) { return vfslide1up(mask, maskedoff, src, value, vl); } + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( [[MERGE:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1up_tu(merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1up_vf_f32mf2_ta(vfloat32mf2_t src, float value, size_t vl) { + return vfslide1up_ta(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1up_tuma(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1up_tumu(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1up_tama(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1up_tamu(mask, merge, src, value, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c @@ -175,3 +175,57 @@ vfloat64m8_t op1, size_t vl) { return vfsqrt(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsqrt_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfsqrt_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsqrt_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) { + return vfsqrt_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsqrt_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfsqrt_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsqrt_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfsqrt_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsqrt_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfsqrt_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsqrt_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfsqrt_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c @@ -364,3 +364,111 @@ vfloat64m8_t op1, double op2, size_t vl) { return vfsub(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsub_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsub_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsub_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfsub_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsub_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsub_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsub_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsub_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsub_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsub_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsub_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsub_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c @@ -324,3 +324,219 @@ vfloat64m8_t op1, float op2, size_t vl) { return vfwadd_wf(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vv_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_vv_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vf_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wv_f64m1_tu(vfloat64m1_t merge, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_wv_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wf_f64m1_tu(vfloat64m1_t merge, vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vv_f64m1_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_vv_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vf_f64m1_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wv_f64m1_ta(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_wv_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wf_f64m1_ta(vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vv_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_vv_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vf_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wv_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_wv_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wf_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_vv_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wv_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_wv_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vv_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_vv_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vf_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wv_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_wv_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wf_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vv_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_vv_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vf_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wv_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_wv_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wf_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c @@ -731,3 +731,381 @@ vfloat32m4_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); } + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_x_f_v_i64m1_tu(vint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_x_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tu(vint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_x_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tu(vuint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_xu_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_xu_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tu(vfloat64m1_t merge, vint32mf2_t src, size_t vl) { + return vfwcvt_f_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tu(vfloat64m1_t merge, vuint32mf2_t src, size_t vl) { + return vfwcvt_f_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_f_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_x_f_v_i64m1_ta(vfloat32mf2_t src, size_t vl) { + return vfwcvt_x_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_ta(vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_x_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_ta(vfloat32mf2_t src, size_t vl) { + return vfwcvt_xu_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_ta(vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_xu_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_ta(vint32mf2_t src, size_t vl) { + return vfwcvt_f_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_ta(vuint32mf2_t src, size_t vl) { + return vfwcvt_f_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_ta(vfloat32mf2_t src, size_t vl) { + return vfwcvt_f_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_x_f_v_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_x_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_x_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_xu_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_xu_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vint32mf2_t src, size_t vl) { + return vfwcvt_f_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vuint32mf2_t src, size_t vl) { + return vfwcvt_f_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_f_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_x_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_x_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_xu_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_xu_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vint32mf2_t src, size_t vl) { + return vfwcvt_f_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vuint32mf2_t src, size_t vl) { + return vfwcvt_f_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_f_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_x_f_v_i64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_x_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_x_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_xu_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_xu_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tama(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vfwcvt_f_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vfwcvt_f_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_f_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_x_f_v_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_x_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_x_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_xu_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_xu_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vint32mf2_t src, size_t vl) { + return vfwcvt_f_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vuint32mf2_t src, size_t vl) { + return vfwcvt_f_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_f_tamu(mask, merge, src, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c @@ -168,3 +168,111 @@ vfloat32m4_t op2, size_t vl) { return vfwmacc(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vv_f64m1_ta(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vf_f64m1_ta(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vv_f64m1_tuma(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vf_f64m1_tuma(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vv_f64m1_tama(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vf_f64m1_tama(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vv_f64m1_tamu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vf_f64m1_tamu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_tamu(mask, vd, vs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c @@ -168,3 +168,111 @@ vfloat32m4_t op2, size_t vl) { return vfwmsac(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vv_f64m1_ta(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vf_f64m1_ta(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vv_f64m1_tuma(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vf_f64m1_tuma(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vv_f64m1_tama(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vf_f64m1_tama(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vv_f64m1_tamu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vf_f64m1_tamu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_tamu(mask, vd, vs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c @@ -164,3 +164,111 @@ vfloat32m4_t op1, float op2, size_t vl) { return vfwmul(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vv_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwmul_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vf_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vv_f64m1_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwmul_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vf_f64m1_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vv_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwmul_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vf_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwmul_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vv_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwmul_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vf_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vv_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwmul_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vf_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c @@ -168,3 +168,111 @@ float op1, vfloat32m4_t op2, size_t vl) { return vfwnmacc(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vv_f64m1_ta(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vf_f64m1_ta(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vv_f64m1_tuma(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vf_f64m1_tuma(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vv_f64m1_tama(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vf_f64m1_tama(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vv_f64m1_tamu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vf_f64m1_tamu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_tamu(mask, vd, vs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c @@ -168,3 +168,111 @@ float op1, vfloat32m4_t op2, size_t vl) { return vfwnmsac(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vv_f64m1_ta(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vf_f64m1_ta(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vv_f64m1_tuma(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vf_f64m1_tuma(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vv_f64m1_tama(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vf_f64m1_tama(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vv_f64m1_tamu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vf_f64m1_tamu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_tamu(mask, vd, vs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c @@ -324,3 +324,219 @@ vfloat64m8_t op1, float op2, size_t vl) { return vfwsub_wf(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vv_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_vv_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vf_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wv_f64m1_tu(vfloat64m1_t merge, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_wv_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wf_f64m1_tu(vfloat64m1_t merge, vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vv_f64m1_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_vv_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vf_f64m1_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wv_f64m1_ta(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_wv_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wf_f64m1_ta(vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vv_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_vv_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vf_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wv_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_wv_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wf_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_vv_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wv_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_wv_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vv_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_vv_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vf_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wv_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_wv_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wf_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vv_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_vv_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vf_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wv_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_wv_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wf_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { return vmacc(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vv_i32mf2_ta(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmacc_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vx_i32mf2_ta(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmacc_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vx_u32mf2_ta(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmacc_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmacc_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmacc_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmacc_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmacc_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmacc_tamu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { return vmadd(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vv_i32mf2_ta(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmadd_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vx_i32mf2_ta(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmadd_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vx_u32mf2_ta(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmadd_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmadd_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmadd_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmadd_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmadd_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmadd_tamu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vmaxu(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmax_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmax_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmaxu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmaxu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmax_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vmax_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmaxu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmaxu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmax_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmax_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmaxu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmaxu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmax_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmax_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmaxu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmaxu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmax_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmax_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmaxu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmaxu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmax_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmax_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmaxu_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmaxu_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vminu(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmin_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmin_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vminu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vminu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmin_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vmin_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vminu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vminu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmin_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmin_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vminu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vminu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmin_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmin_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vminu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vminu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmin_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmin_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vminu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vminu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmin_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmin_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vminu_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vminu_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c @@ -3531,3 +3531,543 @@ vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { return vmulhsu(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmul_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmul_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmul_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmul_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmul_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vmul_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmul_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmul_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vx_i32mf2_ta(vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmul_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmul_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmul_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmul_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmul_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmul_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmul_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmul_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmul_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmul_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmul_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmul_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmul_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmul_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmul_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmul_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c @@ -1188,3 +1188,219 @@ vuint64m8_t op1, size_t shift, size_t vl) { return vnclipu(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wv_i32mf2_tu(vint32mf2_t merge, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclip_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wx_i32mf2_tu(vint32mf2_t merge, vint64m1_t op1, size_t shift, size_t vl) { + return vnclip_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wv_u32mf2_tu(vuint32mf2_t merge, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclipu_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wx_u32mf2_tu(vuint32mf2_t merge, vuint64m1_t op1, size_t shift, size_t vl) { + return vnclipu_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wv_i32mf2_ta(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclip_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wx_i32mf2_ta(vint64m1_t op1, size_t shift, size_t vl) { + return vnclip_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wv_u32mf2_ta(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclipu_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wx_u32mf2_ta(vuint64m1_t op1, size_t shift, size_t vl) { + return vnclipu_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclip_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, size_t shift, size_t vl) { + return vnclip_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclipu_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, size_t shift, size_t vl) { + return vnclipu_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclip_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, size_t shift, size_t vl) { + return vnclip_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclipu_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, size_t shift, size_t vl) { + return vnclipu_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wv_i32mf2_tama(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclip_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wx_i32mf2_tama(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { + return vnclip_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wv_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclipu_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wx_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { + return vnclipu_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclip_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, size_t shift, size_t vl) { + return vnclip_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclipu_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, size_t shift, size_t vl) { + return vnclipu_tamu(mask, merge, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { return vnmsac(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vv_i32mf2_ta(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vx_i32mf2_ta(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vx_u32mf2_ta(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_tamu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { return vnmsub(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vv_i32mf2_ta(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vx_i32mf2_ta(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vx_u32mf2_ta(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_tamu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c @@ -543,3 +543,111 @@ vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { return vnsra(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wv_i32mf2_tu(vint32mf2_t merge, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsra_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wx_i32mf2_tu(vint32mf2_t merge, vint64m1_t op1, size_t shift, size_t vl) { + return vnsra_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wv_i32mf2_ta(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsra_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wx_i32mf2_ta(vint64m1_t op1, size_t shift, size_t vl) { + return vnsra_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsra_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, size_t shift, size_t vl) { + return vnsra_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsra_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, size_t shift, size_t vl) { + return vnsra_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wv_i32mf2_tama(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsra_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wx_i32mf2_tama(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { + return vnsra_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsra_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, size_t shift, size_t vl) { + return vnsra_tamu(mask, merge, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c @@ -543,3 +543,111 @@ vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { return vnsrl(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wv_u32mf2_tu(vuint32mf2_t merge, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsrl_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wx_u32mf2_tu(vuint32mf2_t merge, vuint64m1_t op1, size_t shift, size_t vl) { + return vnsrl_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wv_u32mf2_ta(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsrl_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wx_u32mf2_ta(vuint64m1_t op1, size_t shift, size_t vl) { + return vnsrl_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsrl_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, size_t shift, size_t vl) { + return vnsrl_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsrl_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, size_t shift, size_t vl) { + return vnsrl_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wv_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsrl_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wx_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { + return vnsrl_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsrl_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, size_t shift, size_t vl) { + return vnsrl_tamu(mask, merge, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vor(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vor_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vor_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vor_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vor_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vor_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vor_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vor_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vor_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vor_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vor_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vor_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vor_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vor_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vor_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vor_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vor_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vor_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vor_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vor_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vor_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vor_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vor_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vor_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vor_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vremu(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vrem_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrem_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vremu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vremu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vrem_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vrem_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vremu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vremu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vrem_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrem_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vremu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vremu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vrem_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrem_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vremu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vremu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vrem_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrem_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vremu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vremu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vrem_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrem_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vremu_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vremu_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c @@ -3208,3 +3208,489 @@ size_t vl) { return vrgatherei16(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_tu(merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t index, size_t vl) { + return vrgather_tu(merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_tu(merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t index, size_t vl) { + return vrgather_tu(merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_tu(merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vx_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, size_t index, size_t vl) { + return vrgather_tu(merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgatherei16_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_ta(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vx_i32mf2_ta(vint32mf2_t op1, size_t index, size_t vl) { + return vrgather_ta(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_ta(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vx_u32mf2_ta(vuint32mf2_t op1, size_t index, size_t vl) { + return vrgather_ta(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vv_f32mf2_ta(vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_ta(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vx_f32mf2_ta(vfloat32mf2_t op1, size_t index, size_t vl) { + return vrgather_ta(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgatherei16_vv_i32mf2_ta(vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgatherei16_vv_u32mf2_ta(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_ta(vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_tuma(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t index, size_t vl) { + return vrgather_tuma(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_tuma(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t index, size_t vl) { + return vrgather_tuma(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_tuma(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vx_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t index, size_t vl) { + return vrgather_tuma(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgatherei16_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_tumu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t index, size_t vl) { + return vrgather_tumu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_tumu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t index, size_t vl) { + return vrgather_tumu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_tumu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t index, size_t vl) { + return vrgather_tumu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgatherei16_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_tama(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t index, size_t vl) { + return vrgather_tama(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_tama(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t index, size_t vl) { + return vrgather_tama(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_tama(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vx_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t index, size_t vl) { + return vrgather_tama(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgatherei16_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_tamu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t index, size_t vl) { + return vrgather_tamu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_tamu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t index, size_t vl) { + return vrgather_tamu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_tamu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vx_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t index, size_t vl) { + return vrgather_tamu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgatherei16_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c @@ -795,3 +795,111 @@ vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vrsub(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrsub_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrsub_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrsub_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vrsub_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrsub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vrsub_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrsub_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vrsub_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrsub_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrsub_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrsub_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vrsub_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrsub_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vrsub_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrsub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrsub_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrsub_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vrsub_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrsub_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrsub_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrsub_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vrsub_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c @@ -1702,3 +1702,219 @@ vuint64m8_t op1, uint64_t op2, size_t vl) { return vsaddu(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsadd_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsaddu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsaddu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsadd_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsaddu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsaddu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsadd_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsaddu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsaddu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsadd_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsaddu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsaddu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsadd_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsaddu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsaddu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsadd_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsaddu_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsaddu_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c @@ -883,3 +883,75 @@ vbool8_t borrowin, size_t vl) { return vsbc(op1, op2, borrowin, vl); } + +// CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsbc_vvm_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) { + return vsbc_tu(merge, op1, op2, borrowin, vl); +} + +// CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsbc_vxm_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) { + return vsbc_tu(merge, op1, op2, borrowin, vl); +} + +// CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsbc_vvm_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) { + return vsbc_tu(merge, op1, op2, borrowin, vl); +} + +// CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsbc_vxm_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) { + return vsbc_tu(merge, op1, op2, borrowin, vl); +} + +// CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsbc_vvm_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) { + return vsbc_ta(op1, op2, borrowin, vl); +} + +// CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsbc_vxm_i32mf2_ta(vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) { + return vsbc_ta(op1, op2, borrowin, vl); +} + +// CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsbc_vvm_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) { + return vsbc_ta(op1, op2, borrowin, vl); +} + +// CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsbc_vxm_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) { + return vsbc_ta(op1, op2, borrowin, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c @@ -535,3 +535,57 @@ vint32m4_t op1, size_t vl) { return vsext_vf2(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf2_i64m1_tu(vint64m1_t merge, vint32mf2_t op1, size_t vl) { + return vsext_vf2_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf2_i64m1_ta(vint32mf2_t op1, size_t vl) { + return vsext_vf2_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf2_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, size_t vl) { + return vsext_vf2_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf2_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, size_t vl) { + return vsext_vf2_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf2_i64m1_tama(vbool64_t mask, vint32mf2_t op1, size_t vl) { + return vsext_vf2_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf2_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, size_t vl) { + return vsext_vf2_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c @@ -903,3 +903,111 @@ size_t vl) { return vslide1down(mask, maskedoff, src, value, vl); } + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1down_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1down_tu(merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1down_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1down_tu(merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1down_vx_i32mf2_ta(vint32mf2_t src, int32_t value, size_t vl) { + return vslide1down_ta(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1down_vx_u32mf2_ta(vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1down_ta(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1down_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1down_tuma(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1down_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1down_tuma(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1down_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1down_tumu(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1down_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1down_tumu(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1down_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1down_tama(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1down_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1down_tama(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1down_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1down_tamu(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1down_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1down_tamu(mask, merge, src, value, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c @@ -878,3 +878,111 @@ size_t vl) { return vslide1up(mask, maskedoff, src, value, vl); } + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1up_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1up_tu(merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1up_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1up_tu(merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1up_vx_i32mf2_ta(vint32mf2_t src, int32_t value, size_t vl) { + return vslide1up_ta(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1up_vx_u32mf2_ta(vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1up_ta(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1up_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1up_tuma(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1up_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1up_tuma(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1up_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1up_tumu(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1up_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1up_tumu(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1up_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1up_tama(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1up_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1up_tama(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1up_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1up_tamu(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1up_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1up_tamu(mask, merge, src, value, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c @@ -1109,3 +1109,165 @@ size_t vl) { return vslidedown(mask, dst, src, offset, vl); } + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tu(merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tu(merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tu(merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_ta(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_ta(dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_ta(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_ta(dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_ta(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_ta(dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tama(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, merge, src, offset, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c @@ -1094,3 +1094,165 @@ size_t vl) { return vslideup(mask, dst, src, offset, vl); } + +// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslideup_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { + return vslideup_tu(merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslideup_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { + return vslideup_tu(merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslideup_vx_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslideup_tu(merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslideup_vx_i32mf2_ta(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { + return vslideup_ta(dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslideup_vx_u32mf2_ta(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { + return vslideup_ta(dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslideup_vx_f32mf2_ta(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslideup_ta(dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslideup_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { + return vslideup_tuma(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslideup_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { + return vslideup_tuma(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslideup_vx_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslideup_tuma(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslideup_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { + return vslideup_tumu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslideup_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { + return vslideup_tumu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslideup_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslideup_tumu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslideup_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { + return vslideup_tama(mask, dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslideup_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { + return vslideup_tama(mask, dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslideup_vx_f32mf2_tama(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslideup_tama(mask, dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslideup_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { + return vslideup_tamu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslideup_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { + return vslideup_tamu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslideup_vx_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslideup_tamu(mask, merge, src, offset, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { return vsll(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vsll_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsll_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vx_i32mf2_ta(vint32mf2_t op1, size_t shift, size_t vl) { + return vsll_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vx_u32mf2_ta(vuint32mf2_t op1, size_t shift, size_t vl) { + return vsll_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vsll_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsll_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vsll_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsll_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { + return vsll_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsll_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vsll_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsll_tamu(mask, merge, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c @@ -690,3 +690,111 @@ vint32m8_t op1, int32_t op2, size_t vl) { return vsmul(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsmul_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsmul_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsmul_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsmul_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsmul_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsmul_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c @@ -795,3 +795,111 @@ vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { return vsra(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsra_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vsra_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsra_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vx_i32mf2_ta(vint32mf2_t op1, size_t shift, size_t vl) { + return vsra_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsra_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vsra_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsra_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vsra_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsra_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { + return vsra_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsra_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vsra_tamu(mask, merge, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c @@ -795,3 +795,111 @@ vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { return vsrl(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsrl_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsrl_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsrl_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vx_u32mf2_ta(vuint32mf2_t op1, size_t shift, size_t vl) { + return vsrl_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsrl_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsrl_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsrl_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsrl_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsrl_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsrl_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsrl_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsrl_tamu(mask, merge, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c @@ -845,3 +845,111 @@ vint64m8_t op1, size_t shift, size_t vl) { return vssra(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssra_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vssra_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssra_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vx_i32mf2_ta(vint32mf2_t op1, size_t shift, size_t vl) { + return vssra_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssra_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vssra_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssra_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vssra_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssra_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { + return vssra_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssra_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vssra_tamu(mask, merge, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c @@ -860,3 +860,111 @@ vuint64m8_t op1, size_t shift, size_t vl) { return vssrl(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssrl_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssrl_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vx_u32mf2_ta(vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssrl_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssrl_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssrl_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssrl_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl_tamu(mask, merge, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c @@ -1702,3 +1702,219 @@ vuint64m8_t op1, uint64_t op2, size_t vl) { return vssubu(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vssub_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vssubu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vssubu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vssub_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vssubu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vssubu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vssub_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vssubu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vssubu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vssub_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vssubu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vssubu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vssub_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vssubu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vssubu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vssub_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vssubu_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vssubu_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vsub(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsub_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsub_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsub_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsub_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsub_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vsub_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsub_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsub_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsub_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsub_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsub_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsub_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsub_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsub_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsub_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsub_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsub_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsub_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsub_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsub_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsub_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsub_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsub_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsub_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c @@ -2319,3 +2319,435 @@ vuint64m8_t op1, uint32_t op2, size_t vl) { return vwaddu_wx(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vv_i64m1_tu(vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_vv_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vx_i64m1_tu(vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wv_i64m1_tu(vint64m1_t merge, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_wv_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wx_i64m1_tu(vint64m1_t merge, vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vv_u64m1_tu(vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_vv_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vx_u64m1_tu(vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wv_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_wv_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wx_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vv_i64m1_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_vv_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vx_i64m1_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wv_i64m1_ta(vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_wv_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wx_i64m1_ta(vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vv_u64m1_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_vv_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vx_u64m1_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wv_u64m1_ta(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_wv_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wx_u64m1_ta(vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vv_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_vv_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vx_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wv_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_wv_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wx_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vv_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_vv_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vx_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wv_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_wv_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wx_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_vv_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wv_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_wv_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wx_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_vv_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wv_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_wv_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wx_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vv_i64m1_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_vv_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vx_i64m1_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wv_i64m1_tama(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_wv_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wx_i64m1_tama(vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vv_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_vv_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vx_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wv_u64m1_tama(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_wv_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wx_u64m1_tama(vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vv_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_vv_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vx_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wv_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_wv_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wx_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vv_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_vv_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vx_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wv_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_wv_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wx_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c @@ -2131,3 +2131,381 @@ vint32m4_t op2, size_t vl) { return vwmaccus(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vx_u64m1_tu(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccus_vx_i64m1_tu(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmaccus_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vv_i64m1_ta(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vx_i64m1_ta(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vv_u64m1_ta(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vx_u64m1_ta(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vv_i64m1_ta(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vx_i64m1_ta(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccus_vx_i64m1_ta(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmaccus_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vv_i64m1_tuma(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vx_i64m1_tuma(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vv_u64m1_tuma(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vx_u64m1_tuma(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vv_i64m1_tuma(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vx_i64m1_tuma(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccus_vx_i64m1_tuma(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmaccus_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccus_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmaccus_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vv_i64m1_tama(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vx_i64m1_tama(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vv_u64m1_tama(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vx_u64m1_tama(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vv_i64m1_tama(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vx_i64m1_tama(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccus_vx_i64m1_tama(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmaccus_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vv_i64m1_tamu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vx_i64m1_tamu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_tamu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vv_u64m1_tamu(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vx_u64m1_tamu(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_tamu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vv_i64m1_tamu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vx_i64m1_tamu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_tamu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccus_vx_i64m1_tamu(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmaccus_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c @@ -1623,3 +1623,327 @@ vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { return vwmulsu(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vv_i64m1_tu(vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwmul_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vx_i64m1_tu(vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwmul_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vv_u64m1_tu(vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vx_u64m1_tu(vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vv_i64m1_tu(vint64m1_t merge, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulsu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vx_i64m1_tu(vint64m1_t merge, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulsu_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vv_i64m1_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwmul_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vx_i64m1_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vwmul_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vv_u64m1_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vx_u64m1_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vv_i64m1_ta(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulsu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vx_i64m1_ta(vint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulsu_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vv_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwmul_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vx_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwmul_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vv_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vx_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vv_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulsu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vx_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulsu_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwmul_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwmul_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulsu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulsu_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vv_i64m1_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwmul_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vx_i64m1_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwmul_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vv_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vx_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vv_i64m1_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulsu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vx_i64m1_tama(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulsu_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vv_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwmul_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vx_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwmul_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vv_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulu_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vx_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulu_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vv_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulsu_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vx_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulsu_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c @@ -2319,3 +2319,435 @@ vuint64m8_t op1, uint32_t op2, size_t vl) { return vwsubu_wx(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vv_i64m1_tu (vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_vv_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vx_i64m1_tu (vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wv_i64m1_tu (vint64m1_t merge, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_wv_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wx_i64m1_tu (vint64m1_t merge, vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vv_u64m1_tu (vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_vv_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vx_u64m1_tu (vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wv_u64m1_tu (vuint64m1_t merge, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_wv_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wx_u64m1_tu (vuint64m1_t merge, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vv_i64m1_ta (vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_vv_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vx_i64m1_ta (vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wv_i64m1_ta (vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_wv_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wx_i64m1_ta (vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vv_u64m1_ta (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_vv_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vx_u64m1_ta (vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wv_u64m1_ta (vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_wv_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wx_u64m1_ta (vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vv_i64m1_tuma (vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_vv_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vx_i64m1_tuma (vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wv_i64m1_tuma (vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_wv_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wx_i64m1_tuma (vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vv_u64m1_tuma (vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_vv_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vx_u64m1_tuma (vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wv_u64m1_tuma (vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_wv_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wx_u64m1_tuma (vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vv_i64m1_tumu (vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_vv_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vx_i64m1_tumu (vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wv_i64m1_tumu (vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_wv_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wx_i64m1_tumu (vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_vv_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wv_u64m1_tumu (vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_wv_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wx_u64m1_tumu (vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vv_i64m1_tama (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_vv_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vx_i64m1_tama (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wv_i64m1_tama (vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_wv_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wx_i64m1_tama (vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vv_u64m1_tama (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_vv_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vx_u64m1_tama (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wv_u64m1_tama (vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_wv_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wx_u64m1_tama (vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vv_i64m1_tamu (vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_vv_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vx_i64m1_tamu (vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wv_i64m1_tamu (vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_wv_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wx_i64m1_tamu (vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vv_u64m1_tamu (vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_vv_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vx_u64m1_tamu (vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wv_u64m1_tamu (vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_wv_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wx_u64m1_tamu (vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vxor(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vxor_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vxor_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vxor_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vxor_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vxor_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vxor_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vxor_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vxor_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vxor_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vxor_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vxor_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vxor_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vxor_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vxor_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vxor_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vxor_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vxor_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vxor_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vxor_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vxor_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vxor_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vxor_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vxor_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vxor_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c @@ -535,3 +535,57 @@ vuint32m4_t op1, size_t vl) { return vzext_vf2(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf2_u64m1_tu(vuint64m1_t merge, vuint32mf2_t op1, size_t vl) { + return vzext_vf2_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf2_u64m1_ta(vuint32mf2_t op1, size_t vl) { + return vzext_vf2_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf2_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, size_t vl) { + return vzext_vf2_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf2_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, size_t vl) { + return vzext_vf2_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf2_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, size_t vl) { + return vzext_vf2_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf2_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, size_t vl) { + return vzext_vf2_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c @@ -1702,3 +1702,219 @@ vuint64m8_t op1, uint64_t op2, size_t vl) { return vaaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vaadd_vv_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vaaddu_vv_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vaadd_vv_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vaaddu_vv_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vaadd_vv_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vaaddu_vv_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vaadd_vv_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vaaddu_vv_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vaadd_vv_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vaaddu_vv_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vaadd_vv_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vaaddu_vv_u32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c @@ -883,3 +883,75 @@ size_t vl) { return vadc_vxm_u64m8(op1, op2, carryin, vl); } + +// CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadc_vvm_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) { + return vadc_vvm_i32mf2_tu(merge, op1, op2, carryin, vl); +} + +// CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadc_vxm_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) { + return vadc_vxm_i32mf2_tu(merge, op1, op2, carryin, vl); +} + +// CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadc_vvm_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) { + return vadc_vvm_u32mf2_tu(merge, op1, op2, carryin, vl); +} + +// CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadc_vxm_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) { + return vadc_vxm_u32mf2_tu(merge, op1, op2, carryin, vl); +} + +// CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadc_vvm_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) { + return vadc_vvm_i32mf2_ta(op1, op2, carryin, vl); +} + +// CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadc_vxm_i32mf2_ta(vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) { + return vadc_vxm_i32mf2_ta(op1, op2, carryin, vl); +} + +// CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadc_vvm_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) { + return vadc_vvm_u32mf2_ta(op1, op2, carryin, vl); +} + +// CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadc_vxm_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) { + return vadc_vxm_u32mf2_ta(op1, op2, carryin, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vand_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand_vv_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vand_vx_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand_vv_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand_vv_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vand_vx_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand_vv_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand_vv_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vand_vx_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand_vv_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand_vv_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vand_vx_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand_vv_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand_vv_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vand_vx_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand_vv_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand_vv_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vand_vx_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand_vv_u32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c @@ -1702,3 +1702,219 @@ vuint64m8_t op1, uint64_t op2, size_t vl) { return vasubu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vasub_vv_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vasubu_vv_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vasub_vv_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vasubu_vv_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vasub_vv_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vasubu_vv_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vasub_vv_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vasubu_vv_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vasub_vv_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vasubu_vv_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vasub_vv_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vasubu_vv_u32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vdivu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv_vv_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu_vv_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv_vv_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu_vv_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv_vv_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu_vv_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv_vv_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu_vv_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv_vv_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu_vv_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv_vv_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu_vv_u32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c @@ -11,7 +11,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfadd_vv_f16mf4(op1, op2, vl); } @@ -20,7 +20,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfadd_vf_f16mf4(op1, op2, vl); } @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfadd_vv_f16mf2(op1, op2, vl); } @@ -38,7 +38,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfadd_vf_f16mf2(op1, op2, vl); } @@ -47,7 +47,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfadd_vv_f16m1(op1, op2, vl); } @@ -56,7 +56,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfadd_vf_f16m1(op1, op2, vl); } @@ -65,7 +65,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfadd_vv_f16m2(op1, op2, vl); } @@ -74,7 +74,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfadd_vf_f16m2(op1, op2, vl); } @@ -83,7 +83,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfadd_vv_f16m4(op1, op2, vl); } @@ -92,7 +92,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfadd_vf_f16m4(op1, op2, vl); } @@ -101,7 +101,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfadd_vv_f16m8(op1, op2, vl); } @@ -110,7 +110,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfadd_vf_f16m8(op1, op2, vl); } @@ -119,7 +119,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { +vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfadd_vv_f32mf2(op1, op2, vl); } @@ -128,7 +128,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) { +vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { return vfadd_vf_f32mf2(op1, op2, vl); } @@ -137,7 +137,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { +vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfadd_vv_f32m1(op1, op2, vl); } @@ -146,7 +146,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) { +vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { return vfadd_vf_f32m1(op1, op2, vl); } @@ -155,7 +155,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { +vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfadd_vv_f32m2(op1, op2, vl); } @@ -164,7 +164,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) { +vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { return vfadd_vf_f32m2(op1, op2, vl); } @@ -173,7 +173,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { +vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfadd_vv_f32m4(op1, op2, vl); } @@ -182,7 +182,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { return vfadd_vf_f32m4(op1, op2, vl); } @@ -191,7 +191,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { +vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfadd_vv_f32m8(op1, op2, vl); } @@ -200,7 +200,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { return vfadd_vf_f32m8(op1, op2, vl); } @@ -209,7 +209,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { +vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfadd_vv_f64m1(op1, op2, vl); } @@ -218,7 +218,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { return vfadd_vf_f64m1(op1, op2, vl); } @@ -227,7 +227,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { +vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfadd_vv_f64m2(op1, op2, vl); } @@ -236,7 +236,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { return vfadd_vf_f64m2(op1, op2, vl); } @@ -245,7 +245,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { +vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfadd_vv_f64m4(op1, op2, vl); } @@ -254,7 +254,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { return vfadd_vf_f64m4(op1, op2, vl); } @@ -263,7 +263,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { +vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfadd_vv_f64m8(op1, op2, vl); } @@ -272,7 +272,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { return vfadd_vf_f64m8(op1, op2, vl); } @@ -281,7 +281,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfadd_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -290,7 +290,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfadd_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -299,7 +299,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfadd_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -308,7 +308,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfadd_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -317,7 +317,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfadd_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -326,7 +326,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfadd_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -335,7 +335,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfadd_vv_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -344,7 +344,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfadd_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -353,7 +353,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfadd_vv_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -362,7 +362,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfadd_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -371,7 +371,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfadd_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } @@ -380,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfadd_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } @@ -389,7 +389,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfadd_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { +vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfadd_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); } @@ -398,7 +398,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfadd_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { +vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vfadd_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); } @@ -407,7 +407,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { +vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfadd_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } @@ -416,7 +416,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { +vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vfadd_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } @@ -425,7 +425,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { +vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfadd_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } @@ -434,7 +434,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { +vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vfadd_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } @@ -443,7 +443,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { +vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfadd_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } @@ -452,7 +452,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vfadd_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } @@ -461,7 +461,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { +vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfadd_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } @@ -470,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { return vfadd_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } @@ -479,7 +479,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { +vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfadd_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } @@ -488,7 +488,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { return vfadd_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } @@ -497,7 +497,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { +vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfadd_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } @@ -506,7 +506,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { return vfadd_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } @@ -515,7 +515,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { +vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfadd_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } @@ -524,7 +524,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { return vfadd_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } @@ -533,7 +533,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { +vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfadd_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } @@ -542,6 +542,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vfadd_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd_vv_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfadd_vf_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd_vv_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfadd_vf_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd_vv_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfadd_vf_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd_vv_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfadd_vf_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd_vv_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfadd_vf_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfadd_vv_f32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfadd_vf_f32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c @@ -182,7 +182,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfclass_v_u16mf4 (vfloat16mf4_t op1, size_t vl) { +vuint16mf4_t test_vfclass_v_u16mf4(vfloat16mf4_t op1, size_t vl) { return vfclass_v_u16mf4(op1, vl); } @@ -191,7 +191,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfclass_v_u16mf2 (vfloat16mf2_t op1, size_t vl) { +vuint16mf2_t test_vfclass_v_u16mf2(vfloat16mf2_t op1, size_t vl) { return vfclass_v_u16mf2(op1, vl); } @@ -200,7 +200,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfclass_v_u16m1 (vfloat16m1_t op1, size_t vl) { +vuint16m1_t test_vfclass_v_u16m1(vfloat16m1_t op1, size_t vl) { return vfclass_v_u16m1(op1, vl); } @@ -209,7 +209,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfclass_v_u16m2 (vfloat16m2_t op1, size_t vl) { +vuint16m2_t test_vfclass_v_u16m2(vfloat16m2_t op1, size_t vl) { return vfclass_v_u16m2(op1, vl); } @@ -218,7 +218,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfclass_v_u16m4 (vfloat16m4_t op1, size_t vl) { +vuint16m4_t test_vfclass_v_u16m4(vfloat16m4_t op1, size_t vl) { return vfclass_v_u16m4(op1, vl); } @@ -227,7 +227,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv32f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfclass_v_u16m8 (vfloat16m8_t op1, size_t vl) { +vuint16m8_t test_vfclass_v_u16m8(vfloat16m8_t op1, size_t vl) { return vfclass_v_u16m8(op1, vl); } @@ -236,7 +236,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfclass_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { +vuint16mf4_t test_vfclass_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { return vfclass_v_u16mf4_m(mask, maskedoff, op1, vl); } @@ -245,7 +245,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfclass_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { +vuint16mf2_t test_vfclass_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { return vfclass_v_u16mf2_m(mask, maskedoff, op1, vl); } @@ -254,7 +254,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfclass_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { +vuint16m1_t test_vfclass_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { return vfclass_v_u16m1_m(mask, maskedoff, op1, vl); } @@ -263,7 +263,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfclass_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { +vuint16m2_t test_vfclass_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { return vfclass_v_u16m2_m(mask, maskedoff, op1, vl); } @@ -272,7 +272,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfclass_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { +vuint16m4_t test_vfclass_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { return vfclass_v_u16m4_m(mask, maskedoff, op1, vl); } @@ -281,6 +281,60 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfclass_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { +vuint16m8_t test_vfclass_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { return vfclass_v_u16m8_m(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfclass_v_u32mf2_tu(vuint32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfclass_v_u32mf2_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfclass_v_u32mf2_ta(vfloat32mf2_t op1, size_t vl) { + return vfclass_v_u32mf2_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfclass_v_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfclass_v_u32mf2_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfclass_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfclass_v_u32mf2_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfclass_v_u32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfclass_v_u32mf2_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfclass_v_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfclass_v_u32mf2_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c @@ -1039,7 +1039,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfcvt_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl) { +vint16mf4_t test_vfcvt_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) { return vfcvt_x_f_v_i16mf4(src, vl); } @@ -1048,7 +1048,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl) { +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16mf4(src, vl); } @@ -1057,7 +1057,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfcvt_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl) { +vint16mf2_t test_vfcvt_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) { return vfcvt_x_f_v_i16mf2(src, vl); } @@ -1066,7 +1066,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl) { +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16mf2(src, vl); } @@ -1075,7 +1075,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfcvt_x_f_v_i16m1 (vfloat16m1_t src, size_t vl) { +vint16m1_t test_vfcvt_x_f_v_i16m1(vfloat16m1_t src, size_t vl) { return vfcvt_x_f_v_i16m1(src, vl); } @@ -1084,7 +1084,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1 (vfloat16m1_t src, size_t vl) { +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1(vfloat16m1_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16m1(src, vl); } @@ -1093,7 +1093,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfcvt_x_f_v_i16m2 (vfloat16m2_t src, size_t vl) { +vint16m2_t test_vfcvt_x_f_v_i16m2(vfloat16m2_t src, size_t vl) { return vfcvt_x_f_v_i16m2(src, vl); } @@ -1102,7 +1102,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2 (vfloat16m2_t src, size_t vl) { +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2(vfloat16m2_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16m2(src, vl); } @@ -1111,7 +1111,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfcvt_x_f_v_i16m4 (vfloat16m4_t src, size_t vl) { +vint16m4_t test_vfcvt_x_f_v_i16m4(vfloat16m4_t src, size_t vl) { return vfcvt_x_f_v_i16m4(src, vl); } @@ -1120,7 +1120,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4 (vfloat16m4_t src, size_t vl) { +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4(vfloat16m4_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16m4(src, vl); } @@ -1129,7 +1129,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vfcvt_x_f_v_i16m8 (vfloat16m8_t src, size_t vl) { +vint16m8_t test_vfcvt_x_f_v_i16m8(vfloat16m8_t src, size_t vl) { return vfcvt_x_f_v_i16m8(src, vl); } @@ -1138,7 +1138,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8 (vfloat16m8_t src, size_t vl) { +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8(vfloat16m8_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16m8(src, vl); } @@ -1147,7 +1147,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl) { +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) { return vfcvt_xu_f_v_u16mf4(src, vl); } @@ -1156,7 +1156,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl) { +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16mf4(src, vl); } @@ -1165,7 +1165,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl) { +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) { return vfcvt_xu_f_v_u16mf2(src, vl); } @@ -1174,7 +1174,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl) { +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16mf2(src, vl); } @@ -1183,7 +1183,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfcvt_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl) { +vuint16m1_t test_vfcvt_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) { return vfcvt_xu_f_v_u16m1(src, vl); } @@ -1192,7 +1192,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl) { +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16m1(src, vl); } @@ -1201,7 +1201,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfcvt_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl) { +vuint16m2_t test_vfcvt_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) { return vfcvt_xu_f_v_u16m2(src, vl); } @@ -1210,7 +1210,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl) { +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16m2(src, vl); } @@ -1219,7 +1219,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfcvt_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl) { +vuint16m4_t test_vfcvt_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) { return vfcvt_xu_f_v_u16m4(src, vl); } @@ -1228,7 +1228,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl) { +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16m4(src, vl); } @@ -1237,7 +1237,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfcvt_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl) { +vuint16m8_t test_vfcvt_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) { return vfcvt_xu_f_v_u16m8(src, vl); } @@ -1246,7 +1246,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl) { +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16m8(src, vl); } @@ -1255,7 +1255,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4 (vint16mf4_t src, size_t vl) { +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4(vint16mf4_t src, size_t vl) { return vfcvt_f_x_v_f16mf4(src, vl); } @@ -1264,7 +1264,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2 (vint16mf2_t src, size_t vl) { +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2(vint16mf2_t src, size_t vl) { return vfcvt_f_x_v_f16mf2(src, vl); } @@ -1273,7 +1273,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfcvt_f_x_v_f16m1 (vint16m1_t src, size_t vl) { +vfloat16m1_t test_vfcvt_f_x_v_f16m1(vint16m1_t src, size_t vl) { return vfcvt_f_x_v_f16m1(src, vl); } @@ -1282,7 +1282,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfcvt_f_x_v_f16m2 (vint16m2_t src, size_t vl) { +vfloat16m2_t test_vfcvt_f_x_v_f16m2(vint16m2_t src, size_t vl) { return vfcvt_f_x_v_f16m2(src, vl); } @@ -1291,7 +1291,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfcvt_f_x_v_f16m4 (vint16m4_t src, size_t vl) { +vfloat16m4_t test_vfcvt_f_x_v_f16m4(vint16m4_t src, size_t vl) { return vfcvt_f_x_v_f16m4(src, vl); } @@ -1300,7 +1300,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfcvt_f_x_v_f16m8 (vint16m8_t src, size_t vl) { +vfloat16m8_t test_vfcvt_f_x_v_f16m8(vint16m8_t src, size_t vl) { return vfcvt_f_x_v_f16m8(src, vl); } @@ -1309,7 +1309,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4 (vuint16mf4_t src, size_t vl) { +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4(vuint16mf4_t src, size_t vl) { return vfcvt_f_xu_v_f16mf4(src, vl); } @@ -1318,7 +1318,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2 (vuint16mf2_t src, size_t vl) { +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2(vuint16mf2_t src, size_t vl) { return vfcvt_f_xu_v_f16mf2(src, vl); } @@ -1327,7 +1327,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfcvt_f_xu_v_f16m1 (vuint16m1_t src, size_t vl) { +vfloat16m1_t test_vfcvt_f_xu_v_f16m1(vuint16m1_t src, size_t vl) { return vfcvt_f_xu_v_f16m1(src, vl); } @@ -1336,7 +1336,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfcvt_f_xu_v_f16m2 (vuint16m2_t src, size_t vl) { +vfloat16m2_t test_vfcvt_f_xu_v_f16m2(vuint16m2_t src, size_t vl) { return vfcvt_f_xu_v_f16m2(src, vl); } @@ -1345,7 +1345,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfcvt_f_xu_v_f16m4 (vuint16m4_t src, size_t vl) { +vfloat16m4_t test_vfcvt_f_xu_v_f16m4(vuint16m4_t src, size_t vl) { return vfcvt_f_xu_v_f16m4(src, vl); } @@ -1354,7 +1354,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfcvt_f_xu_v_f16m8 (vuint16m8_t src, size_t vl) { +vfloat16m8_t test_vfcvt_f_xu_v_f16m8(vuint16m8_t src, size_t vl) { return vfcvt_f_xu_v_f16m8(src, vl); } @@ -1363,7 +1363,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfcvt_x_f_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { +vint16mf4_t test_vfcvt_x_f_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfcvt_x_f_v_i16mf4_m(mask, maskedoff, src, vl); } @@ -1372,7 +1372,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16mf4_m(mask, maskedoff, src, vl); } @@ -1381,7 +1381,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfcvt_x_f_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { +vint16mf2_t test_vfcvt_x_f_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfcvt_x_f_v_i16mf2_m(mask, maskedoff, src, vl); } @@ -1390,7 +1390,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16mf2_m(mask, maskedoff, src, vl); } @@ -1399,7 +1399,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfcvt_x_f_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { +vint16m1_t test_vfcvt_x_f_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { return vfcvt_x_f_v_i16m1_m(mask, maskedoff, src, vl); } @@ -1408,7 +1408,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16m1_m(mask, maskedoff, src, vl); } @@ -1417,7 +1417,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfcvt_x_f_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { +vint16m2_t test_vfcvt_x_f_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { return vfcvt_x_f_v_i16m2_m(mask, maskedoff, src, vl); } @@ -1426,7 +1426,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16m2_m(mask, maskedoff, src, vl); } @@ -1435,7 +1435,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfcvt_x_f_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { +vint16m4_t test_vfcvt_x_f_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { return vfcvt_x_f_v_i16m4_m(mask, maskedoff, src, vl); } @@ -1444,7 +1444,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16m4_m(mask, maskedoff, src, vl); } @@ -1453,7 +1453,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vfcvt_x_f_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { +vint16m8_t test_vfcvt_x_f_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { return vfcvt_x_f_v_i16m8_m(mask, maskedoff, src, vl); } @@ -1462,7 +1462,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16m8_m(mask, maskedoff, src, vl); } @@ -1471,7 +1471,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfcvt_xu_f_v_u16mf4_m(mask, maskedoff, src, vl); } @@ -1480,7 +1480,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16mf4_m(mask, maskedoff, src, vl); } @@ -1489,7 +1489,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfcvt_xu_f_v_u16mf2_m(mask, maskedoff, src, vl); } @@ -1498,7 +1498,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16mf2_m(mask, maskedoff, src, vl); } @@ -1507,7 +1507,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfcvt_xu_f_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { +vuint16m1_t test_vfcvt_xu_f_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { return vfcvt_xu_f_v_u16m1_m(mask, maskedoff, src, vl); } @@ -1516,7 +1516,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16m1_m(mask, maskedoff, src, vl); } @@ -1525,7 +1525,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfcvt_xu_f_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { +vuint16m2_t test_vfcvt_xu_f_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { return vfcvt_xu_f_v_u16m2_m(mask, maskedoff, src, vl); } @@ -1534,7 +1534,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16m2_m(mask, maskedoff, src, vl); } @@ -1543,7 +1543,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfcvt_xu_f_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { +vuint16m4_t test_vfcvt_xu_f_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { return vfcvt_xu_f_v_u16m4_m(mask, maskedoff, src, vl); } @@ -1552,7 +1552,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16m4_m(mask, maskedoff, src, vl); } @@ -1561,7 +1561,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfcvt_xu_f_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { +vuint16m8_t test_vfcvt_xu_f_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { return vfcvt_xu_f_v_u16m8_m(mask, maskedoff, src, vl); } @@ -1570,7 +1570,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16m8_m(mask, maskedoff, src, vl); } @@ -1579,7 +1579,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { return vfcvt_f_x_v_f16mf4_m(mask, maskedoff, src, vl); } @@ -1588,7 +1588,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { return vfcvt_f_x_v_f16mf2_m(mask, maskedoff, src, vl); } @@ -1597,7 +1597,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfcvt_f_x_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { +vfloat16m1_t test_vfcvt_f_x_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { return vfcvt_f_x_v_f16m1_m(mask, maskedoff, src, vl); } @@ -1606,7 +1606,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfcvt_f_x_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { +vfloat16m2_t test_vfcvt_f_x_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { return vfcvt_f_x_v_f16m2_m(mask, maskedoff, src, vl); } @@ -1615,7 +1615,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfcvt_f_x_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { +vfloat16m4_t test_vfcvt_f_x_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { return vfcvt_f_x_v_f16m4_m(mask, maskedoff, src, vl); } @@ -1624,7 +1624,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfcvt_f_x_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { +vfloat16m8_t test_vfcvt_f_x_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { return vfcvt_f_x_v_f16m8_m(mask, maskedoff, src, vl); } @@ -1633,7 +1633,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { return vfcvt_f_xu_v_f16mf4_m(mask, maskedoff, src, vl); } @@ -1642,7 +1642,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { return vfcvt_f_xu_v_f16mf2_m(mask, maskedoff, src, vl); } @@ -1651,7 +1651,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { return vfcvt_f_xu_v_f16m1_m(mask, maskedoff, src, vl); } @@ -1660,7 +1660,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { return vfcvt_f_xu_v_f16m2_m(mask, maskedoff, src, vl); } @@ -1669,7 +1669,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { return vfcvt_f_xu_v_f16m4_m(mask, maskedoff, src, vl); } @@ -1678,6 +1678,330 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { return vfcvt_f_xu_v_f16m8_m(mask, maskedoff, src, vl); } + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tu(vint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_x_f_v_i32mf2_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32mf2_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tu(vuint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_xu_f_v_u32mf2_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32mf2_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tu(vfloat32mf2_t merge, vint32mf2_t src, size_t vl) { + return vfcvt_f_x_v_f32mf2_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t merge, vuint32mf2_t src, size_t vl) { + return vfcvt_f_xu_v_f32mf2_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_x_f_v_i32mf2_ta(vfloat32mf2_t src, size_t vl) { + return vfcvt_x_f_v_i32mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_ta(vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_ta(vfloat32mf2_t src, size_t vl) { + return vfcvt_xu_f_v_u32mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_ta(vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_ta(vint32mf2_t src, size_t vl) { + return vfcvt_f_x_v_f32mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_ta(vuint32mf2_t src, size_t vl) { + return vfcvt_f_xu_v_f32mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_x_f_v_i32mf2_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32mf2_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_xu_f_v_u32mf2_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32mf2_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vint32mf2_t src, size_t vl) { + return vfcvt_f_x_v_f32mf2_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vuint32mf2_t src, size_t vl) { + return vfcvt_f_xu_v_f32mf2_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_x_f_v_i32mf2_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32mf2_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_xu_f_v_u32mf2_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32mf2_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vint32mf2_t src, size_t vl) { + return vfcvt_f_x_v_f32mf2_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vuint32mf2_t src, size_t vl) { + return vfcvt_f_xu_v_f32mf2_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_x_f_v_i32mf2_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32mf2_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_xu_f_v_u32mf2_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32mf2_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tama(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vfcvt_f_x_v_f32mf2_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vfcvt_f_xu_v_f32mf2_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_x_f_v_i32mf2_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32mf2_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_xu_f_v_u32mf2_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32mf2_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vint32mf2_t src, size_t vl) { + return vfcvt_f_x_v_f32mf2_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vuint32mf2_t src, size_t vl) { + return vfcvt_f_xu_v_f32mf2_tamu(mask, merge, src, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c @@ -371,7 +371,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfdiv_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfdiv_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfdiv_vv_f16mf4(op1, op2, vl); } @@ -380,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfdiv_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfdiv_vf_f16mf4(op1, op2, vl); } @@ -389,7 +389,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfdiv_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfdiv_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfdiv_vv_f16mf2(op1, op2, vl); } @@ -398,7 +398,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfdiv_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfdiv_vf_f16mf2(op1, op2, vl); } @@ -407,7 +407,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfdiv_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfdiv_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfdiv_vv_f16m1(op1, op2, vl); } @@ -416,7 +416,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfdiv_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfdiv_vf_f16m1(op1, op2, vl); } @@ -425,7 +425,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfdiv_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfdiv_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfdiv_vv_f16m2(op1, op2, vl); } @@ -434,7 +434,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfdiv_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfdiv_vf_f16m2(op1, op2, vl); } @@ -443,7 +443,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfdiv_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfdiv_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfdiv_vv_f16m4(op1, op2, vl); } @@ -452,7 +452,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfdiv_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfdiv_vf_f16m4(op1, op2, vl); } @@ -461,7 +461,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfdiv_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfdiv_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfdiv_vv_f16m8(op1, op2, vl); } @@ -470,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfdiv_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfdiv_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfdiv_vf_f16m8(op1, op2, vl); } @@ -479,7 +479,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfdiv_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfdiv_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfdiv_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -488,7 +488,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfdiv_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfdiv_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -497,7 +497,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfdiv_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfdiv_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfdiv_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -506,7 +506,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfdiv_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfdiv_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -515,7 +515,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfdiv_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfdiv_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfdiv_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -524,7 +524,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfdiv_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfdiv_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -533,7 +533,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfdiv_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfdiv_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfdiv_vv_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -542,7 +542,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfdiv_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfdiv_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -551,7 +551,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfdiv_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfdiv_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfdiv_vv_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -560,7 +560,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfdiv_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfdiv_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -569,7 +569,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfdiv_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfdiv_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfdiv_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } @@ -578,6 +578,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfdiv_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfdiv_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfdiv_vv_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfdiv_vf_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfdiv_vv_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfdiv_vf_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfdiv_vv_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfdiv_vf_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfdiv_vv_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfdiv_vf_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfdiv_vv_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfdiv_vf_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfdiv_vv_f32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfdiv_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfdiv_vf_f32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c @@ -380,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmacc_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmacc_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfmacc_vv_f16mf4(vd, vs1, vs2, vl); } @@ -389,7 +389,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmacc_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmacc_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { return vfmacc_vf_f16mf4(vd, rs1, vs2, vl); } @@ -398,7 +398,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmacc_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmacc_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfmacc_vv_f16mf2(vd, vs1, vs2, vl); } @@ -407,7 +407,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmacc_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmacc_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { return vfmacc_vf_f16mf2(vd, rs1, vs2, vl); } @@ -416,7 +416,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmacc_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfmacc_vv_f16m1(vd, vs1, vs2, vl); } @@ -425,7 +425,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmacc_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { return vfmacc_vf_f16m1(vd, rs1, vs2, vl); } @@ -434,7 +434,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmacc_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfmacc_vv_f16m2(vd, vs1, vs2, vl); } @@ -443,7 +443,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmacc_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { return vfmacc_vf_f16m2(vd, rs1, vs2, vl); } @@ -452,7 +452,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmacc_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfmacc_vv_f16m4(vd, vs1, vs2, vl); } @@ -461,7 +461,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmacc_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { return vfmacc_vf_f16m4(vd, rs1, vs2, vl); } @@ -470,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmacc_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return vfmacc_vv_f16m8(vd, vs1, vs2, vl); } @@ -479,7 +479,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmacc_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { return vfmacc_vf_f16m8(vd, rs1, vs2, vl); } @@ -488,7 +488,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmacc_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfmacc_vv_f16mf4_m(mask, vd, vs1, vs2, vl); } @@ -497,7 +497,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmacc_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { return vfmacc_vf_f16mf4_m(mask, vd, rs1, vs2, vl); } @@ -506,7 +506,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmacc_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfmacc_vv_f16mf2_m(mask, vd, vs1, vs2, vl); } @@ -515,7 +515,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmacc_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { return vfmacc_vf_f16mf2_m(mask, vd, rs1, vs2, vl); } @@ -524,7 +524,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmacc_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfmacc_vv_f16m1_m(mask, vd, vs1, vs2, vl); } @@ -533,7 +533,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmacc_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { return vfmacc_vf_f16m1_m(mask, vd, rs1, vs2, vl); } @@ -542,7 +542,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmacc_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfmacc_vv_f16m2_m(mask, vd, vs1, vs2, vl); } @@ -551,7 +551,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmacc_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { return vfmacc_vf_f16m2_m(mask, vd, rs1, vs2, vl); } @@ -560,7 +560,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmacc_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfmacc_vv_f16m4_m(mask, vd, vs1, vs2, vl); } @@ -569,7 +569,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmacc_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { return vfmacc_vf_f16m4_m(mask, vd, rs1, vs2, vl); } @@ -578,7 +578,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmacc_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return vfmacc_vv_f16m8_m(mask, vd, vs1, vs2, vl); } @@ -587,6 +587,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmacc_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { return vfmacc_vf_f16m8_m(mask, vd, rs1, vs2, vl); } + +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_vv_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_vf_f32mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_vv_f32mf2_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_vf_f32mf2_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_vv_f32mf2_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_vf_f32mf2_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_vv_f32mf2_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_vf_f32mf2_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_vv_f32mf2_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmacc_vf_f32mf2_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c @@ -380,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmadd_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmadd_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfmadd_vv_f16mf4(vd, vs1, vs2, vl); } @@ -389,7 +389,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmadd_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmadd_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { return vfmadd_vf_f16mf4(vd, rs1, vs2, vl); } @@ -398,7 +398,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmadd_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmadd_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfmadd_vv_f16mf2(vd, vs1, vs2, vl); } @@ -407,7 +407,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmadd_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmadd_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { return vfmadd_vf_f16mf2(vd, rs1, vs2, vl); } @@ -416,7 +416,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmadd_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfmadd_vv_f16m1(vd, vs1, vs2, vl); } @@ -425,7 +425,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmadd_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { return vfmadd_vf_f16m1(vd, rs1, vs2, vl); } @@ -434,7 +434,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmadd_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfmadd_vv_f16m2(vd, vs1, vs2, vl); } @@ -443,7 +443,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmadd_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { return vfmadd_vf_f16m2(vd, rs1, vs2, vl); } @@ -452,7 +452,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmadd_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfmadd_vv_f16m4(vd, vs1, vs2, vl); } @@ -461,7 +461,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmadd_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { return vfmadd_vf_f16m4(vd, rs1, vs2, vl); } @@ -470,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmadd_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return vfmadd_vv_f16m8(vd, vs1, vs2, vl); } @@ -479,7 +479,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmadd_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { return vfmadd_vf_f16m8(vd, rs1, vs2, vl); } @@ -488,7 +488,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfmadd_vv_f16mf4_m(mask, vd, vs1, vs2, vl); } @@ -497,7 +497,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { return vfmadd_vf_f16mf4_m(mask, vd, rs1, vs2, vl); } @@ -506,7 +506,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfmadd_vv_f16mf2_m(mask, vd, vs1, vs2, vl); } @@ -515,7 +515,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { return vfmadd_vf_f16mf2_m(mask, vd, rs1, vs2, vl); } @@ -524,7 +524,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfmadd_vv_f16m1_m(mask, vd, vs1, vs2, vl); } @@ -533,7 +533,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { return vfmadd_vf_f16m1_m(mask, vd, rs1, vs2, vl); } @@ -542,7 +542,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfmadd_vv_f16m2_m(mask, vd, vs1, vs2, vl); } @@ -551,7 +551,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { return vfmadd_vf_f16m2_m(mask, vd, rs1, vs2, vl); } @@ -560,7 +560,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfmadd_vv_f16m4_m(mask, vd, vs1, vs2, vl); } @@ -569,7 +569,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { return vfmadd_vf_f16m4_m(mask, vd, rs1, vs2, vl); } @@ -578,7 +578,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return vfmadd_vv_f16m8_m(mask, vd, vs1, vs2, vl); } @@ -587,6 +587,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { return vfmadd_vf_f16m8_m(mask, vd, rs1, vs2, vl); } + +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_vv_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_vf_f32mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_vv_f32mf2_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_vf_f32mf2_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_vv_f32mf2_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_vf_f32mf2_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_vv_f32mf2_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_vf_f32mf2_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_vv_f32mf2_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmadd_vf_f32mf2_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c @@ -371,7 +371,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmax_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfmax_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfmax_vv_f16mf4(op1, op2, vl); } @@ -380,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmax_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfmax_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfmax_vf_f16mf4(op1, op2, vl); } @@ -389,7 +389,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmax_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfmax_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfmax_vv_f16mf2(op1, op2, vl); } @@ -398,7 +398,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmax_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfmax_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfmax_vf_f16mf2(op1, op2, vl); } @@ -407,7 +407,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmax_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfmax_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfmax_vv_f16m1(op1, op2, vl); } @@ -416,7 +416,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmax_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfmax_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfmax_vf_f16m1(op1, op2, vl); } @@ -425,7 +425,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmax_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfmax_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfmax_vv_f16m2(op1, op2, vl); } @@ -434,7 +434,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmax_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfmax_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfmax_vf_f16m2(op1, op2, vl); } @@ -443,7 +443,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmax_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfmax_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfmax_vv_f16m4(op1, op2, vl); } @@ -452,7 +452,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmax_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfmax_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfmax_vf_f16m4(op1, op2, vl); } @@ -461,7 +461,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmax_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfmax_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfmax_vv_f16m8(op1, op2, vl); } @@ -470,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmax_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfmax_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfmax_vf_f16m8(op1, op2, vl); } @@ -479,7 +479,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmax_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfmax_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfmax_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -488,7 +488,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmax_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfmax_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfmax_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -497,7 +497,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmax_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfmax_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfmax_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -506,7 +506,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmax_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfmax_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfmax_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -515,7 +515,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmax_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfmax_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfmax_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -524,7 +524,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmax_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfmax_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfmax_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -533,7 +533,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmax_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfmax_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfmax_vv_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -542,7 +542,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmax_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfmax_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfmax_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -551,7 +551,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmax_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfmax_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfmax_vv_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -560,7 +560,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmax_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfmax_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfmax_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -569,7 +569,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmax_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfmax_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfmax_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } @@ -578,6 +578,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmax_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfmax_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfmax_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmax_vv_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmax_vf_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmax_vv_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfmax_vf_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmax_vv_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmax_vf_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmax_vv_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmax_vf_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmax_vv_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmax_vf_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmax_vv_f32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmax_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmax_vf_f32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c @@ -371,7 +371,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmin_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfmin_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfmin_vv_f16mf4(op1, op2, vl); } @@ -380,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmin_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfmin_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfmin_vf_f16mf4(op1, op2, vl); } @@ -389,7 +389,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmin_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfmin_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfmin_vv_f16mf2(op1, op2, vl); } @@ -398,7 +398,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmin_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfmin_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfmin_vf_f16mf2(op1, op2, vl); } @@ -407,7 +407,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmin_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfmin_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfmin_vv_f16m1(op1, op2, vl); } @@ -416,7 +416,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmin_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfmin_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfmin_vf_f16m1(op1, op2, vl); } @@ -425,7 +425,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmin_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfmin_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfmin_vv_f16m2(op1, op2, vl); } @@ -434,7 +434,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmin_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfmin_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfmin_vf_f16m2(op1, op2, vl); } @@ -443,7 +443,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmin_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfmin_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfmin_vv_f16m4(op1, op2, vl); } @@ -452,7 +452,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmin_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfmin_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfmin_vf_f16m4(op1, op2, vl); } @@ -461,7 +461,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmin_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfmin_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfmin_vv_f16m8(op1, op2, vl); } @@ -470,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmin_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfmin_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfmin_vf_f16m8(op1, op2, vl); } @@ -479,7 +479,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmin_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfmin_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfmin_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -488,7 +488,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmin_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfmin_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfmin_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -497,7 +497,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmin_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfmin_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfmin_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -506,7 +506,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmin_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfmin_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfmin_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -515,7 +515,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmin_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfmin_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfmin_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -524,7 +524,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmin_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfmin_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfmin_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -533,7 +533,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmin_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfmin_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfmin_vv_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -542,7 +542,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmin_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfmin_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfmin_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -551,7 +551,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmin_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfmin_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfmin_vv_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -560,7 +560,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmin_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfmin_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfmin_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -569,7 +569,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmin_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfmin_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfmin_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } @@ -578,6 +578,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmin_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfmin_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfmin_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmin_vv_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmin_vf_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmin_vv_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfmin_vf_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmin_vv_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmin_vf_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmin_vv_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmin_vf_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmin_vv_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmin_vf_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmin_vv_f32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmin_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmin_vf_f32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c @@ -380,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmsac_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsac_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfmsac_vv_f16mf4(vd, vs1, vs2, vl); } @@ -389,7 +389,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmsac_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsac_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { return vfmsac_vf_f16mf4(vd, rs1, vs2, vl); } @@ -398,7 +398,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmsac_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsac_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfmsac_vv_f16mf2(vd, vs1, vs2, vl); } @@ -407,7 +407,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmsac_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsac_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { return vfmsac_vf_f16mf2(vd, rs1, vs2, vl); } @@ -416,7 +416,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmsac_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfmsac_vv_f16m1(vd, vs1, vs2, vl); } @@ -425,7 +425,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmsac_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { return vfmsac_vf_f16m1(vd, rs1, vs2, vl); } @@ -434,7 +434,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmsac_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfmsac_vv_f16m2(vd, vs1, vs2, vl); } @@ -443,7 +443,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmsac_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { return vfmsac_vf_f16m2(vd, rs1, vs2, vl); } @@ -452,7 +452,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmsac_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfmsac_vv_f16m4(vd, vs1, vs2, vl); } @@ -461,7 +461,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmsac_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { return vfmsac_vf_f16m4(vd, rs1, vs2, vl); } @@ -470,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmsac_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return vfmsac_vv_f16m8(vd, vs1, vs2, vl); } @@ -479,7 +479,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmsac_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { return vfmsac_vf_f16m8(vd, rs1, vs2, vl); } @@ -488,7 +488,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmsac_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfmsac_vv_f16mf4_m(mask, vd, vs1, vs2, vl); } @@ -497,7 +497,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmsac_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { return vfmsac_vf_f16mf4_m(mask, vd, rs1, vs2, vl); } @@ -506,7 +506,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmsac_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfmsac_vv_f16mf2_m(mask, vd, vs1, vs2, vl); } @@ -515,7 +515,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmsac_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { return vfmsac_vf_f16mf2_m(mask, vd, rs1, vs2, vl); } @@ -524,7 +524,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmsac_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfmsac_vv_f16m1_m(mask, vd, vs1, vs2, vl); } @@ -533,7 +533,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmsac_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { return vfmsac_vf_f16m1_m(mask, vd, rs1, vs2, vl); } @@ -542,7 +542,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmsac_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfmsac_vv_f16m2_m(mask, vd, vs1, vs2, vl); } @@ -551,7 +551,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmsac_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { return vfmsac_vf_f16m2_m(mask, vd, rs1, vs2, vl); } @@ -560,7 +560,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmsac_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfmsac_vv_f16m4_m(mask, vd, vs1, vs2, vl); } @@ -569,7 +569,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmsac_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { return vfmsac_vf_f16m4_m(mask, vd, rs1, vs2, vl); } @@ -578,7 +578,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmsac_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return vfmsac_vv_f16m8_m(mask, vd, vs1, vs2, vl); } @@ -587,6 +587,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmsac_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { return vfmsac_vf_f16m8_m(mask, vd, rs1, vs2, vl); } + +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_vv_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_vf_f32mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_vv_f32mf2_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_vf_f32mf2_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_vv_f32mf2_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_vf_f32mf2_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_vv_f32mf2_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_vf_f32mf2_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_vv_f32mf2_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsac_vf_f32mf2_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c @@ -380,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmsub_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsub_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfmsub_vv_f16mf4(vd, vs1, vs2, vl); } @@ -389,7 +389,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmsub_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsub_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { return vfmsub_vf_f16mf4(vd, rs1, vs2, vl); } @@ -398,7 +398,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmsub_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsub_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfmsub_vv_f16mf2(vd, vs1, vs2, vl); } @@ -407,7 +407,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmsub_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsub_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { return vfmsub_vf_f16mf2(vd, rs1, vs2, vl); } @@ -416,7 +416,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmsub_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfmsub_vv_f16m1(vd, vs1, vs2, vl); } @@ -425,7 +425,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmsub_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { return vfmsub_vf_f16m1(vd, rs1, vs2, vl); } @@ -434,7 +434,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmsub_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfmsub_vv_f16m2(vd, vs1, vs2, vl); } @@ -443,7 +443,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmsub_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { return vfmsub_vf_f16m2(vd, rs1, vs2, vl); } @@ -452,7 +452,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmsub_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfmsub_vv_f16m4(vd, vs1, vs2, vl); } @@ -461,7 +461,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmsub_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { return vfmsub_vf_f16m4(vd, rs1, vs2, vl); } @@ -470,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmsub_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return vfmsub_vv_f16m8(vd, vs1, vs2, vl); } @@ -479,7 +479,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmsub_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { return vfmsub_vf_f16m8(vd, rs1, vs2, vl); } @@ -488,7 +488,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmsub_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfmsub_vv_f16mf4_m(mask, vd, vs1, vs2, vl); } @@ -497,7 +497,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { return vfmsub_vf_f16mf4_m(mask, vd, rs1, vs2, vl); } @@ -506,7 +506,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmsub_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfmsub_vv_f16mf2_m(mask, vd, vs1, vs2, vl); } @@ -515,7 +515,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { return vfmsub_vf_f16mf2_m(mask, vd, rs1, vs2, vl); } @@ -524,7 +524,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmsub_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfmsub_vv_f16m1_m(mask, vd, vs1, vs2, vl); } @@ -533,7 +533,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { return vfmsub_vf_f16m1_m(mask, vd, rs1, vs2, vl); } @@ -542,7 +542,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmsub_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfmsub_vv_f16m2_m(mask, vd, vs1, vs2, vl); } @@ -551,7 +551,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { return vfmsub_vf_f16m2_m(mask, vd, rs1, vs2, vl); } @@ -560,7 +560,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmsub_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfmsub_vv_f16m4_m(mask, vd, vs1, vs2, vl); } @@ -569,7 +569,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { return vfmsub_vf_f16m4_m(mask, vd, rs1, vs2, vl); } @@ -578,7 +578,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmsub_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return vfmsub_vv_f16m8_m(mask, vd, vs1, vs2, vl); } @@ -587,6 +587,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { return vfmsub_vf_f16m8_m(mask, vd, rs1, vs2, vl); } + +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_vv_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_vf_f32mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_vv_f32mf2_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_vf_f32mf2_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_vv_f32mf2_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_vf_f32mf2_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_vv_f32mf2_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_vf_f32mf2_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_vv_f32mf2_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfmsub_vf_f32mf2_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c @@ -371,7 +371,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmul_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfmul_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfmul_vv_f16mf4(op1, op2, vl); } @@ -380,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmul_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfmul_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfmul_vf_f16mf4(op1, op2, vl); } @@ -389,7 +389,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmul_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfmul_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfmul_vv_f16mf2(op1, op2, vl); } @@ -398,7 +398,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmul_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfmul_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfmul_vf_f16mf2(op1, op2, vl); } @@ -407,7 +407,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmul_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfmul_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfmul_vv_f16m1(op1, op2, vl); } @@ -416,7 +416,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmul_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfmul_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfmul_vf_f16m1(op1, op2, vl); } @@ -425,7 +425,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmul_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfmul_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfmul_vv_f16m2(op1, op2, vl); } @@ -434,7 +434,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmul_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfmul_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfmul_vf_f16m2(op1, op2, vl); } @@ -443,7 +443,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmul_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfmul_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfmul_vv_f16m4(op1, op2, vl); } @@ -452,7 +452,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmul_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfmul_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfmul_vf_f16m4(op1, op2, vl); } @@ -461,7 +461,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmul_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfmul_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfmul_vv_f16m8(op1, op2, vl); } @@ -470,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmul_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfmul_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfmul_vf_f16m8(op1, op2, vl); } @@ -479,7 +479,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmul_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfmul_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfmul_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -488,7 +488,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmul_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfmul_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfmul_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -497,7 +497,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmul_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfmul_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfmul_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -506,7 +506,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmul_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfmul_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfmul_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -515,7 +515,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmul_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfmul_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfmul_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -524,7 +524,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmul_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfmul_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfmul_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -533,7 +533,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmul_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfmul_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfmul_vv_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -542,7 +542,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmul_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfmul_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfmul_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -551,7 +551,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmul_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfmul_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfmul_vv_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -560,7 +560,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmul_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfmul_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfmul_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -569,7 +569,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmul_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfmul_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfmul_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } @@ -578,6 +578,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmul_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfmul_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfmul_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmul_vv_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmul_vf_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmul_vv_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfmul_vf_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmul_vv_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmul_vf_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmul_vv_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmul_vf_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmul_vv_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmul_vf_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfmul_vv_f32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmul_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmul_vf_f32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c @@ -1015,7 +1015,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vfncvt_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl) { +vint8mf8_t test_vfncvt_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) { return vfncvt_x_f_w_i8mf8(src, vl); } @@ -1024,7 +1024,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl) { +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8mf8(src, vl); } @@ -1033,7 +1033,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vfncvt_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl) { +vint8mf4_t test_vfncvt_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) { return vfncvt_x_f_w_i8mf4(src, vl); } @@ -1042,7 +1042,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl) { +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8mf4(src, vl); } @@ -1051,7 +1051,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vfncvt_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl) { +vint8mf2_t test_vfncvt_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) { return vfncvt_x_f_w_i8mf2(src, vl); } @@ -1060,7 +1060,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl) { +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8mf2(src, vl); } @@ -1069,7 +1069,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vfncvt_x_f_w_i8m1 (vfloat16m2_t src, size_t vl) { +vint8m1_t test_vfncvt_x_f_w_i8m1(vfloat16m2_t src, size_t vl) { return vfncvt_x_f_w_i8m1(src, vl); } @@ -1078,7 +1078,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1 (vfloat16m2_t src, size_t vl) { +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1(vfloat16m2_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8m1(src, vl); } @@ -1087,7 +1087,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vfncvt_x_f_w_i8m2 (vfloat16m4_t src, size_t vl) { +vint8m2_t test_vfncvt_x_f_w_i8m2(vfloat16m4_t src, size_t vl) { return vfncvt_x_f_w_i8m2(src, vl); } @@ -1096,7 +1096,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2 (vfloat16m4_t src, size_t vl) { +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2(vfloat16m4_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8m2(src, vl); } @@ -1105,7 +1105,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vfncvt_x_f_w_i8m4 (vfloat16m8_t src, size_t vl) { +vint8m4_t test_vfncvt_x_f_w_i8m4(vfloat16m8_t src, size_t vl) { return vfncvt_x_f_w_i8m4(src, vl); } @@ -1114,7 +1114,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4 (vfloat16m8_t src, size_t vl) { +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4(vfloat16m8_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8m4(src, vl); } @@ -1123,7 +1123,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl) { +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) { return vfncvt_xu_f_w_u8mf8(src, vl); } @@ -1132,7 +1132,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl) { +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8mf8(src, vl); } @@ -1141,7 +1141,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl) { +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) { return vfncvt_xu_f_w_u8mf4(src, vl); } @@ -1150,7 +1150,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl) { +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8mf4(src, vl); } @@ -1159,7 +1159,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl) { +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) { return vfncvt_xu_f_w_u8mf2(src, vl); } @@ -1168,7 +1168,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl) { +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8mf2(src, vl); } @@ -1177,7 +1177,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vfncvt_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl) { +vuint8m1_t test_vfncvt_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) { return vfncvt_xu_f_w_u8m1(src, vl); } @@ -1186,7 +1186,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl) { +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8m1(src, vl); } @@ -1195,7 +1195,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vfncvt_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl) { +vuint8m2_t test_vfncvt_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) { return vfncvt_xu_f_w_u8m2(src, vl); } @@ -1204,7 +1204,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl) { +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8m2(src, vl); } @@ -1213,7 +1213,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vfncvt_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl) { +vuint8m4_t test_vfncvt_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) { return vfncvt_xu_f_w_u8m4(src, vl); } @@ -1222,7 +1222,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl) { +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8m4(src, vl); } @@ -1231,7 +1231,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4 (vint32mf2_t src, size_t vl) { +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4(vint32mf2_t src, size_t vl) { return vfncvt_f_x_w_f16mf4(src, vl); } @@ -1240,7 +1240,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2 (vint32m1_t src, size_t vl) { +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2(vint32m1_t src, size_t vl) { return vfncvt_f_x_w_f16mf2(src, vl); } @@ -1249,7 +1249,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_x_w_f16m1 (vint32m2_t src, size_t vl) { +vfloat16m1_t test_vfncvt_f_x_w_f16m1(vint32m2_t src, size_t vl) { return vfncvt_f_x_w_f16m1(src, vl); } @@ -1258,7 +1258,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_x_w_f16m2 (vint32m4_t src, size_t vl) { +vfloat16m2_t test_vfncvt_f_x_w_f16m2(vint32m4_t src, size_t vl) { return vfncvt_f_x_w_f16m2(src, vl); } @@ -1267,7 +1267,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_x_w_f16m4 (vint32m8_t src, size_t vl) { +vfloat16m4_t test_vfncvt_f_x_w_f16m4(vint32m8_t src, size_t vl) { return vfncvt_f_x_w_f16m4(src, vl); } @@ -1276,7 +1276,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4 (vuint32mf2_t src, size_t vl) { +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4(vuint32mf2_t src, size_t vl) { return vfncvt_f_xu_w_f16mf4(src, vl); } @@ -1285,7 +1285,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2 (vuint32m1_t src, size_t vl) { +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2(vuint32m1_t src, size_t vl) { return vfncvt_f_xu_w_f16mf2(src, vl); } @@ -1294,7 +1294,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_xu_w_f16m1 (vuint32m2_t src, size_t vl) { +vfloat16m1_t test_vfncvt_f_xu_w_f16m1(vuint32m2_t src, size_t vl) { return vfncvt_f_xu_w_f16m1(src, vl); } @@ -1303,7 +1303,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_xu_w_f16m2 (vuint32m4_t src, size_t vl) { +vfloat16m2_t test_vfncvt_f_xu_w_f16m2(vuint32m4_t src, size_t vl) { return vfncvt_f_xu_w_f16m2(src, vl); } @@ -1312,7 +1312,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_xu_w_f16m4 (vuint32m8_t src, size_t vl) { +vfloat16m4_t test_vfncvt_f_xu_w_f16m4(vuint32m8_t src, size_t vl) { return vfncvt_f_xu_w_f16m4(src, vl); } @@ -1321,7 +1321,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4 (vfloat32mf2_t src, size_t vl) { +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) { return vfncvt_f_f_w_f16mf4(src, vl); } @@ -1330,7 +1330,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4 (vfloat32mf2_t src, size_t vl) { +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) { return vfncvt_rod_f_f_w_f16mf4(src, vl); } @@ -1339,7 +1339,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2 (vfloat32m1_t src, size_t vl) { +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) { return vfncvt_f_f_w_f16mf2(src, vl); } @@ -1348,7 +1348,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2 (vfloat32m1_t src, size_t vl) { +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) { return vfncvt_rod_f_f_w_f16mf2(src, vl); } @@ -1357,7 +1357,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_f_w_f16m1 (vfloat32m2_t src, size_t vl) { +vfloat16m1_t test_vfncvt_f_f_w_f16m1(vfloat32m2_t src, size_t vl) { return vfncvt_f_f_w_f16m1(src, vl); } @@ -1366,7 +1366,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1 (vfloat32m2_t src, size_t vl) { +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1(vfloat32m2_t src, size_t vl) { return vfncvt_rod_f_f_w_f16m1(src, vl); } @@ -1375,7 +1375,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_f_w_f16m2 (vfloat32m4_t src, size_t vl) { +vfloat16m2_t test_vfncvt_f_f_w_f16m2(vfloat32m4_t src, size_t vl) { return vfncvt_f_f_w_f16m2(src, vl); } @@ -1384,7 +1384,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2 (vfloat32m4_t src, size_t vl) { +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2(vfloat32m4_t src, size_t vl) { return vfncvt_rod_f_f_w_f16m2(src, vl); } @@ -1393,7 +1393,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_f_w_f16m4 (vfloat32m8_t src, size_t vl) { +vfloat16m4_t test_vfncvt_f_f_w_f16m4(vfloat32m8_t src, size_t vl) { return vfncvt_f_f_w_f16m4(src, vl); } @@ -1402,7 +1402,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4 (vfloat32m8_t src, size_t vl) { +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4(vfloat32m8_t src, size_t vl) { return vfncvt_rod_f_f_w_f16m4(src, vl); } @@ -1411,7 +1411,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vfncvt_x_f_w_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { +vint8mf8_t test_vfncvt_x_f_w_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfncvt_x_f_w_i8mf8_m(mask, maskedoff, src, vl); } @@ -1420,7 +1420,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8mf8_m(mask, maskedoff, src, vl); } @@ -1429,7 +1429,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vfncvt_x_f_w_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { +vint8mf4_t test_vfncvt_x_f_w_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfncvt_x_f_w_i8mf4_m(mask, maskedoff, src, vl); } @@ -1438,7 +1438,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8mf4_m(mask, maskedoff, src, vl); } @@ -1447,7 +1447,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vfncvt_x_f_w_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { +vint8mf2_t test_vfncvt_x_f_w_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { return vfncvt_x_f_w_i8mf2_m(mask, maskedoff, src, vl); } @@ -1456,7 +1456,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8mf2_m(mask, maskedoff, src, vl); } @@ -1465,7 +1465,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vfncvt_x_f_w_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { +vint8m1_t test_vfncvt_x_f_w_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { return vfncvt_x_f_w_i8m1_m(mask, maskedoff, src, vl); } @@ -1474,7 +1474,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8m1_m(mask, maskedoff, src, vl); } @@ -1483,7 +1483,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vfncvt_x_f_w_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { +vint8m2_t test_vfncvt_x_f_w_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { return vfncvt_x_f_w_i8m2_m(mask, maskedoff, src, vl); } @@ -1492,7 +1492,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8m2_m(mask, maskedoff, src, vl); } @@ -1501,7 +1501,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vfncvt_x_f_w_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { +vint8m4_t test_vfncvt_x_f_w_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { return vfncvt_x_f_w_i8m4_m(mask, maskedoff, src, vl); } @@ -1510,7 +1510,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8m4_m(mask, maskedoff, src, vl); } @@ -1519,7 +1519,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfncvt_xu_f_w_u8mf8_m(mask, maskedoff, src, vl); } @@ -1528,7 +1528,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8mf8_m(mask, maskedoff, src, vl); } @@ -1537,7 +1537,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfncvt_xu_f_w_u8mf4_m(mask, maskedoff, src, vl); } @@ -1546,7 +1546,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8mf4_m(mask, maskedoff, src, vl); } @@ -1555,7 +1555,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { return vfncvt_xu_f_w_u8mf2_m(mask, maskedoff, src, vl); } @@ -1564,7 +1564,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8mf2_m(mask, maskedoff, src, vl); } @@ -1573,7 +1573,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vfncvt_xu_f_w_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { +vuint8m1_t test_vfncvt_xu_f_w_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { return vfncvt_xu_f_w_u8m1_m(mask, maskedoff, src, vl); } @@ -1582,7 +1582,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8m1_m(mask, maskedoff, src, vl); } @@ -1591,7 +1591,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vfncvt_xu_f_w_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { +vuint8m2_t test_vfncvt_xu_f_w_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { return vfncvt_xu_f_w_u8m2_m(mask, maskedoff, src, vl); } @@ -1600,7 +1600,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8m2_m(mask, maskedoff, src, vl); } @@ -1609,7 +1609,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vfncvt_xu_f_w_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { +vuint8m4_t test_vfncvt_xu_f_w_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { return vfncvt_xu_f_w_u8m4_m(mask, maskedoff, src, vl); } @@ -1618,7 +1618,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8m4_m(mask, maskedoff, src, vl); } @@ -1627,7 +1627,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { return vfncvt_f_x_w_f16mf4_m(mask, maskedoff, src, vl); } @@ -1636,7 +1636,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { return vfncvt_f_x_w_f16mf2_m(mask, maskedoff, src, vl); } @@ -1645,7 +1645,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_x_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { +vfloat16m1_t test_vfncvt_f_x_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { return vfncvt_f_x_w_f16m1_m(mask, maskedoff, src, vl); } @@ -1654,7 +1654,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_x_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { +vfloat16m2_t test_vfncvt_f_x_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { return vfncvt_f_x_w_f16m2_m(mask, maskedoff, src, vl); } @@ -1663,7 +1663,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_x_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { +vfloat16m4_t test_vfncvt_f_x_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { return vfncvt_f_x_w_f16m4_m(mask, maskedoff, src, vl); } @@ -1672,7 +1672,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { return vfncvt_f_xu_w_f16mf4_m(mask, maskedoff, src, vl); } @@ -1681,7 +1681,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { return vfncvt_f_xu_w_f16mf2_m(mask, maskedoff, src, vl); } @@ -1690,7 +1690,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { return vfncvt_f_xu_w_f16m1_m(mask, maskedoff, src, vl); } @@ -1699,7 +1699,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { return vfncvt_f_xu_w_f16m2_m(mask, maskedoff, src, vl); } @@ -1708,7 +1708,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { return vfncvt_f_xu_w_f16m4_m(mask, maskedoff, src, vl); } @@ -1717,7 +1717,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfncvt_f_f_w_f16mf4_m(mask, maskedoff, src, vl); } @@ -1726,7 +1726,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfncvt_rod_f_f_w_f16mf4_m(mask, maskedoff, src, vl); } @@ -1735,7 +1735,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfncvt_f_f_w_f16mf2_m(mask, maskedoff, src, vl); } @@ -1744,7 +1744,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfncvt_rod_f_f_w_f16mf2_m(mask, maskedoff, src, vl); } @@ -1753,7 +1753,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_f_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { +vfloat16m1_t test_vfncvt_f_f_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { return vfncvt_f_f_w_f16m1_m(mask, maskedoff, src, vl); } @@ -1762,7 +1762,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { return vfncvt_rod_f_f_w_f16m1_m(mask, maskedoff, src, vl); } @@ -1771,7 +1771,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_f_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { +vfloat16m2_t test_vfncvt_f_f_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { return vfncvt_f_f_w_f16m2_m(mask, maskedoff, src, vl); } @@ -1780,7 +1780,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { return vfncvt_rod_f_f_w_f16m2_m(mask, maskedoff, src, vl); } @@ -1789,7 +1789,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_f_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { +vfloat16m4_t test_vfncvt_f_f_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { return vfncvt_f_f_w_f16m4_m(mask, maskedoff, src, vl); } @@ -1798,6 +1798,438 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { return vfncvt_rod_f_f_w_f16m4_m(mask, maskedoff, src, vl); } + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tu(vint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_x_f_w_i16mf4_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tu(vint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i16mf4_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tu(vuint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_xu_f_w_u16mf4_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tu(vuint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16mf4_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tu(vfloat16mf4_t merge, vint32mf2_t src, size_t vl) { + return vfncvt_f_x_w_f16mf4_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tu(vfloat16mf4_t merge, vuint32mf2_t src, size_t vl) { + return vfncvt_f_xu_w_f16mf4_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tu(vfloat16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_f_f_w_f16mf4_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16mf4_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_x_f_w_i16mf4_ta(vfloat32mf2_t src, size_t vl) { + return vfncvt_x_f_w_i16mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_ta(vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i16mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_ta(vfloat32mf2_t src, size_t vl) { + return vfncvt_xu_f_w_u16mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_ta(vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_ta(vint32mf2_t src, size_t vl) { + return vfncvt_f_x_w_f16mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_ta(vuint32mf2_t src, size_t vl) { + return vfncvt_f_xu_w_f16mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_ta(vfloat32mf2_t src, size_t vl) { + return vfncvt_f_f_w_f16mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_ta(vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tuma(vbool64_t mask, vint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_x_f_w_i16mf4_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tuma(vbool64_t mask, vint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i16mf4_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tuma(vbool64_t mask, vuint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_xu_f_w_u16mf4_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tuma(vbool64_t mask, vuint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16mf4_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tuma(vbool64_t mask, vfloat16mf4_t merge, vint32mf2_t src, size_t vl) { + return vfncvt_f_x_w_f16mf4_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tuma(vbool64_t mask, vfloat16mf4_t merge, vuint32mf2_t src, size_t vl) { + return vfncvt_f_xu_w_f16mf4_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tuma(vbool64_t mask, vfloat16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_f_f_w_f16mf4_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tuma(vbool64_t mask, vfloat16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16mf4_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_x_f_w_i16mf4_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i16mf4_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_xu_f_w_u16mf4_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16mf4_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t merge, vint32mf2_t src, size_t vl) { + return vfncvt_f_x_w_f16mf4_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t merge, vuint32mf2_t src, size_t vl) { + return vfncvt_f_xu_w_f16mf4_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_f_f_w_f16mf4_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16mf4_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_x_f_w_i16mf4_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i16mf4_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_xu_f_w_u16mf4_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16mf4_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tama(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vfncvt_f_x_w_f16mf4_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vfncvt_f_xu_w_f16mf4_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_f_f_w_f16mf4_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16mf4_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tamu(vbool64_t mask, vint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_x_f_w_i16mf4_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tamu(vbool64_t mask, vint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i16mf4_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tamu(vbool64_t mask, vuint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_xu_f_w_u16mf4_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tamu(vbool64_t mask, vuint16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16mf4_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tamu(vbool64_t mask, vfloat16mf4_t merge, vint32mf2_t src, size_t vl) { + return vfncvt_f_x_w_f16mf4_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tamu(vbool64_t mask, vfloat16mf4_t merge, vuint32mf2_t src, size_t vl) { + return vfncvt_f_xu_w_f16mf4_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tamu(vbool64_t mask, vfloat16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_f_f_w_f16mf4_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tamu(vbool64_t mask, vfloat16mf4_t merge, vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16mf4_tamu(mask, merge, src, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c @@ -381,7 +381,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfnmacc_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmacc_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfnmacc_vv_f16mf4(vd, vs1, vs2, vl); } @@ -390,7 +390,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfnmacc_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmacc_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { return vfnmacc_vf_f16mf4(vd, rs1, vs2, vl); } @@ -399,7 +399,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfnmacc_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmacc_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfnmacc_vv_f16mf2(vd, vs1, vs2, vl); } @@ -408,7 +408,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfnmacc_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmacc_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { return vfnmacc_vf_f16mf2(vd, rs1, vs2, vl); } @@ -417,7 +417,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfnmacc_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmacc_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfnmacc_vv_f16m1(vd, vs1, vs2, vl); } @@ -426,7 +426,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfnmacc_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmacc_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { return vfnmacc_vf_f16m1(vd, rs1, vs2, vl); } @@ -435,7 +435,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfnmacc_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmacc_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfnmacc_vv_f16m2(vd, vs1, vs2, vl); } @@ -444,7 +444,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfnmacc_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmacc_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { return vfnmacc_vf_f16m2(vd, rs1, vs2, vl); } @@ -453,7 +453,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfnmacc_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmacc_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfnmacc_vv_f16m4(vd, vs1, vs2, vl); } @@ -462,7 +462,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfnmacc_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmacc_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { return vfnmacc_vf_f16m4(vd, rs1, vs2, vl); } @@ -471,7 +471,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfnmacc_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return vfnmacc_vv_f16m8(vd, vs1, vs2, vl); } @@ -480,7 +480,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfnmacc_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmacc_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { return vfnmacc_vf_f16m8(vd, rs1, vs2, vl); } @@ -489,7 +489,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfnmacc_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfnmacc_vv_f16mf4_m(mask, vd, vs1, vs2, vl); } @@ -498,7 +498,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfnmacc_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { return vfnmacc_vf_f16mf4_m(mask, vd, rs1, vs2, vl); } @@ -507,7 +507,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfnmacc_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfnmacc_vv_f16mf2_m(mask, vd, vs1, vs2, vl); } @@ -516,7 +516,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfnmacc_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { return vfnmacc_vf_f16mf2_m(mask, vd, rs1, vs2, vl); } @@ -525,7 +525,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfnmacc_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfnmacc_vv_f16m1_m(mask, vd, vs1, vs2, vl); } @@ -534,7 +534,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfnmacc_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { return vfnmacc_vf_f16m1_m(mask, vd, rs1, vs2, vl); } @@ -543,7 +543,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfnmacc_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfnmacc_vv_f16m2_m(mask, vd, vs1, vs2, vl); } @@ -552,7 +552,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfnmacc_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { return vfnmacc_vf_f16m2_m(mask, vd, rs1, vs2, vl); } @@ -561,7 +561,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfnmacc_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfnmacc_vv_f16m4_m(mask, vd, vs1, vs2, vl); } @@ -570,7 +570,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfnmacc_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { return vfnmacc_vf_f16m4_m(mask, vd, rs1, vs2, vl); } @@ -579,7 +579,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfnmacc_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return vfnmacc_vv_f16m8_m(mask, vd, vs1, vs2, vl); } @@ -588,6 +588,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfnmacc_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { return vfnmacc_vf_f16m8_m(mask, vd, rs1, vs2, vl); } + +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_vv_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_vf_f32mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_vv_f32mf2_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_vf_f32mf2_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_vv_f32mf2_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_vf_f32mf2_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_vv_f32mf2_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_vf_f32mf2_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_vv_f32mf2_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmacc_vf_f32mf2_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c @@ -381,7 +381,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfnmadd_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmadd_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfnmadd_vv_f16mf4(vd, vs1, vs2, vl); } @@ -390,7 +390,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfnmadd_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmadd_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { return vfnmadd_vf_f16mf4(vd, rs1, vs2, vl); } @@ -399,7 +399,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfnmadd_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmadd_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfnmadd_vv_f16mf2(vd, vs1, vs2, vl); } @@ -408,7 +408,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfnmadd_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmadd_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { return vfnmadd_vf_f16mf2(vd, rs1, vs2, vl); } @@ -417,7 +417,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfnmadd_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmadd_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfnmadd_vv_f16m1(vd, vs1, vs2, vl); } @@ -426,7 +426,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfnmadd_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmadd_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { return vfnmadd_vf_f16m1(vd, rs1, vs2, vl); } @@ -435,7 +435,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfnmadd_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmadd_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfnmadd_vv_f16m2(vd, vs1, vs2, vl); } @@ -444,7 +444,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfnmadd_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmadd_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { return vfnmadd_vf_f16m2(vd, rs1, vs2, vl); } @@ -453,7 +453,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfnmadd_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmadd_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfnmadd_vv_f16m4(vd, vs1, vs2, vl); } @@ -462,7 +462,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfnmadd_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmadd_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { return vfnmadd_vf_f16m4(vd, rs1, vs2, vl); } @@ -471,7 +471,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfnmadd_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return vfnmadd_vv_f16m8(vd, vs1, vs2, vl); } @@ -480,7 +480,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfnmadd_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmadd_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { return vfnmadd_vf_f16m8(vd, rs1, vs2, vl); } @@ -489,7 +489,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfnmadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfnmadd_vv_f16mf4_m(mask, vd, vs1, vs2, vl); } @@ -498,7 +498,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfnmadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { return vfnmadd_vf_f16mf4_m(mask, vd, rs1, vs2, vl); } @@ -507,7 +507,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfnmadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfnmadd_vv_f16mf2_m(mask, vd, vs1, vs2, vl); } @@ -516,7 +516,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfnmadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { return vfnmadd_vf_f16mf2_m(mask, vd, rs1, vs2, vl); } @@ -525,7 +525,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfnmadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfnmadd_vv_f16m1_m(mask, vd, vs1, vs2, vl); } @@ -534,7 +534,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfnmadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { return vfnmadd_vf_f16m1_m(mask, vd, rs1, vs2, vl); } @@ -543,7 +543,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfnmadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfnmadd_vv_f16m2_m(mask, vd, vs1, vs2, vl); } @@ -552,7 +552,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfnmadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { return vfnmadd_vf_f16m2_m(mask, vd, rs1, vs2, vl); } @@ -561,7 +561,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfnmadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfnmadd_vv_f16m4_m(mask, vd, vs1, vs2, vl); } @@ -570,7 +570,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfnmadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { return vfnmadd_vf_f16m4_m(mask, vd, rs1, vs2, vl); } @@ -579,7 +579,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfnmadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return vfnmadd_vv_f16m8_m(mask, vd, vs1, vs2, vl); } @@ -588,6 +588,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfnmadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { return vfnmadd_vf_f16m8_m(mask, vd, rs1, vs2, vl); } + +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_vv_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_vf_f32mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_vv_f32mf2_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_vf_f32mf2_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_vv_f32mf2_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_vf_f32mf2_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_vv_f32mf2_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_vf_f32mf2_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_vv_f32mf2_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmadd_vf_f32mf2_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c @@ -381,7 +381,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfnmsac_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsac_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfnmsac_vv_f16mf4(vd, vs1, vs2, vl); } @@ -390,7 +390,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfnmsac_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsac_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { return vfnmsac_vf_f16mf4(vd, rs1, vs2, vl); } @@ -399,7 +399,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfnmsac_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsac_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfnmsac_vv_f16mf2(vd, vs1, vs2, vl); } @@ -408,7 +408,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfnmsac_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsac_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { return vfnmsac_vf_f16mf2(vd, rs1, vs2, vl); } @@ -417,7 +417,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfnmsac_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsac_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfnmsac_vv_f16m1(vd, vs1, vs2, vl); } @@ -426,7 +426,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfnmsac_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsac_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { return vfnmsac_vf_f16m1(vd, rs1, vs2, vl); } @@ -435,7 +435,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfnmsac_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsac_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfnmsac_vv_f16m2(vd, vs1, vs2, vl); } @@ -444,7 +444,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfnmsac_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsac_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { return vfnmsac_vf_f16m2(vd, rs1, vs2, vl); } @@ -453,7 +453,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfnmsac_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsac_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfnmsac_vv_f16m4(vd, vs1, vs2, vl); } @@ -462,7 +462,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfnmsac_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsac_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { return vfnmsac_vf_f16m4(vd, rs1, vs2, vl); } @@ -471,7 +471,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfnmsac_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return vfnmsac_vv_f16m8(vd, vs1, vs2, vl); } @@ -480,7 +480,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfnmsac_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsac_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { return vfnmsac_vf_f16m8(vd, rs1, vs2, vl); } @@ -489,7 +489,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfnmsac_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfnmsac_vv_f16mf4_m(mask, vd, vs1, vs2, vl); } @@ -498,7 +498,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfnmsac_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { return vfnmsac_vf_f16mf4_m(mask, vd, rs1, vs2, vl); } @@ -507,7 +507,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfnmsac_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfnmsac_vv_f16mf2_m(mask, vd, vs1, vs2, vl); } @@ -516,7 +516,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfnmsac_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { return vfnmsac_vf_f16mf2_m(mask, vd, rs1, vs2, vl); } @@ -525,7 +525,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfnmsac_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfnmsac_vv_f16m1_m(mask, vd, vs1, vs2, vl); } @@ -534,7 +534,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfnmsac_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { return vfnmsac_vf_f16m1_m(mask, vd, rs1, vs2, vl); } @@ -543,7 +543,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfnmsac_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfnmsac_vv_f16m2_m(mask, vd, vs1, vs2, vl); } @@ -552,7 +552,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfnmsac_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { return vfnmsac_vf_f16m2_m(mask, vd, rs1, vs2, vl); } @@ -561,7 +561,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfnmsac_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfnmsac_vv_f16m4_m(mask, vd, vs1, vs2, vl); } @@ -570,7 +570,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfnmsac_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { return vfnmsac_vf_f16m4_m(mask, vd, rs1, vs2, vl); } @@ -579,7 +579,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfnmsac_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return vfnmsac_vv_f16m8_m(mask, vd, vs1, vs2, vl); } @@ -588,6 +588,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfnmsac_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { return vfnmsac_vf_f16m8_m(mask, vd, rs1, vs2, vl); } + +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_vv_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_vf_f32mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_vv_f32mf2_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_vf_f32mf2_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_vv_f32mf2_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_vf_f32mf2_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_vv_f32mf2_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_vf_f32mf2_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_vv_f32mf2_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsac_vf_f32mf2_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c @@ -381,7 +381,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfnmsub_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsub_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfnmsub_vv_f16mf4(vd, vs1, vs2, vl); } @@ -390,7 +390,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfnmsub_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsub_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { return vfnmsub_vf_f16mf4(vd, rs1, vs2, vl); } @@ -399,7 +399,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfnmsub_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsub_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfnmsub_vv_f16mf2(vd, vs1, vs2, vl); } @@ -408,7 +408,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfnmsub_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsub_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { return vfnmsub_vf_f16mf2(vd, rs1, vs2, vl); } @@ -417,7 +417,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfnmsub_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsub_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfnmsub_vv_f16m1(vd, vs1, vs2, vl); } @@ -426,7 +426,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfnmsub_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsub_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { return vfnmsub_vf_f16m1(vd, rs1, vs2, vl); } @@ -435,7 +435,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfnmsub_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsub_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfnmsub_vv_f16m2(vd, vs1, vs2, vl); } @@ -444,7 +444,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfnmsub_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsub_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { return vfnmsub_vf_f16m2(vd, rs1, vs2, vl); } @@ -453,7 +453,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfnmsub_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsub_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfnmsub_vv_f16m4(vd, vs1, vs2, vl); } @@ -462,7 +462,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfnmsub_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsub_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { return vfnmsub_vf_f16m4(vd, rs1, vs2, vl); } @@ -471,7 +471,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfnmsub_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return vfnmsub_vv_f16m8(vd, vs1, vs2, vl); } @@ -480,22 +480,16 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfnmsub_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsub_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { return vfnmsub_vf_f16m8(vd, rs1, vs2, vl); } - - - - - - // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfnmsub_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfnmsub_vv_f16mf4_m(mask, vd, vs1, vs2, vl); } @@ -504,7 +498,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfnmsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { return vfnmsub_vf_f16mf4_m(mask, vd, rs1, vs2, vl); } @@ -513,7 +507,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfnmsub_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfnmsub_vv_f16mf2_m(mask, vd, vs1, vs2, vl); } @@ -522,7 +516,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfnmsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { return vfnmsub_vf_f16mf2_m(mask, vd, rs1, vs2, vl); } @@ -531,7 +525,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfnmsub_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfnmsub_vv_f16m1_m(mask, vd, vs1, vs2, vl); } @@ -540,7 +534,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfnmsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { return vfnmsub_vf_f16m1_m(mask, vd, rs1, vs2, vl); } @@ -549,7 +543,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfnmsub_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfnmsub_vv_f16m2_m(mask, vd, vs1, vs2, vl); } @@ -558,7 +552,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfnmsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { return vfnmsub_vf_f16m2_m(mask, vd, rs1, vs2, vl); } @@ -567,7 +561,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfnmsub_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfnmsub_vv_f16m4_m(mask, vd, vs1, vs2, vl); } @@ -576,7 +570,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfnmsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { return vfnmsub_vf_f16m4_m(mask, vd, rs1, vs2, vl); } @@ -585,7 +579,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.nxv32f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfnmsub_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return vfnmsub_vv_f16m8_m(mask, vd, vs1, vs2, vl); } @@ -594,6 +588,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.f16.i64( [[VD:%.*]], half [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfnmsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { return vfnmsub_vf_f16m8_m(mask, vd, rs1, vs2, vl); } + +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_vv_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_vf_f32mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vv_f32mf2_ta(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_vv_f32mf2_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vf_f32mf2_ta(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_vf_f32mf2_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_vv_f32mf2_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_vf_f32mf2_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_vv_f32mf2_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_vf_f32mf2_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_vv_f32mf2_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[VD:%.*]], float [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { + return vfnmsub_vf_f32mf2_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c @@ -182,7 +182,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfrdiv_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfrdiv_vf_f16mf4(op1, op2, vl); } @@ -191,7 +191,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfrdiv_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfrdiv_vf_f16mf2(op1, op2, vl); } @@ -200,7 +200,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfrdiv_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfrdiv_vf_f16m1(op1, op2, vl); } @@ -209,7 +209,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfrdiv_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfrdiv_vf_f16m2(op1, op2, vl); } @@ -218,7 +218,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfrdiv_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfrdiv_vf_f16m4(op1, op2, vl); } @@ -227,7 +227,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrdiv_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfrdiv_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfrdiv_vf_f16m8(op1, op2, vl); } @@ -236,7 +236,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrdiv_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfrdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfrdiv_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -245,7 +245,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrdiv_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfrdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfrdiv_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -254,7 +254,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrdiv_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfrdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfrdiv_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -263,7 +263,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrdiv_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfrdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfrdiv_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -272,7 +272,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrdiv_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfrdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfrdiv_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -281,6 +281,60 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrdiv_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfrdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfrdiv_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrdiv_vf_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrdiv_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfrdiv_vf_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrdiv_vf_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrdiv_vf_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrdiv_vf_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrdiv_vf_f32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c @@ -182,7 +182,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrec7_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { +vfloat16mf4_t test_vfrec7_v_f16mf4(vfloat16mf4_t op1, size_t vl) { return vfrec7_v_f16mf4(op1, vl); } @@ -191,7 +191,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrec7_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { +vfloat16mf2_t test_vfrec7_v_f16mf2(vfloat16mf2_t op1, size_t vl) { return vfrec7_v_f16mf2(op1, vl); } @@ -200,7 +200,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrec7_v_f16m1 (vfloat16m1_t op1, size_t vl) { +vfloat16m1_t test_vfrec7_v_f16m1(vfloat16m1_t op1, size_t vl) { return vfrec7_v_f16m1(op1, vl); } @@ -209,7 +209,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrec7_v_f16m2 (vfloat16m2_t op1, size_t vl) { +vfloat16m2_t test_vfrec7_v_f16m2(vfloat16m2_t op1, size_t vl) { return vfrec7_v_f16m2(op1, vl); } @@ -218,7 +218,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrec7_v_f16m4 (vfloat16m4_t op1, size_t vl) { +vfloat16m4_t test_vfrec7_v_f16m4(vfloat16m4_t op1, size_t vl) { return vfrec7_v_f16m4(op1, vl); } @@ -227,7 +227,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv32f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrec7_v_f16m8 (vfloat16m8_t op1, size_t vl) { +vfloat16m8_t test_vfrec7_v_f16m8(vfloat16m8_t op1, size_t vl) { return vfrec7_v_f16m8(op1, vl); } @@ -236,7 +236,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrec7_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { +vfloat16mf4_t test_vfrec7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { return vfrec7_v_f16mf4_m(mask, maskedoff, op1, vl); } @@ -245,7 +245,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrec7_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { +vfloat16mf2_t test_vfrec7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { return vfrec7_v_f16mf2_m(mask, maskedoff, op1, vl); } @@ -254,7 +254,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrec7_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { +vfloat16m1_t test_vfrec7_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { return vfrec7_v_f16m1_m(mask, maskedoff, op1, vl); } @@ -263,7 +263,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrec7_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { +vfloat16m2_t test_vfrec7_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { return vfrec7_v_f16m2_m(mask, maskedoff, op1, vl); } @@ -272,7 +272,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrec7_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { +vfloat16m4_t test_vfrec7_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { return vfrec7_v_f16m4_m(mask, maskedoff, op1, vl); } @@ -281,6 +281,60 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrec7_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { +vfloat16m8_t test_vfrec7_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { return vfrec7_v_f16m8_m(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrec7_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfrec7_v_f32mf2_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrec7_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) { + return vfrec7_v_f32mf2_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrec7_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfrec7_v_f32mf2_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrec7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfrec7_v_f32mf2_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrec7_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfrec7_v_f32mf2_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrec7_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfrec7_v_f32mf2_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c @@ -182,7 +182,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrsqrt7_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { +vfloat16mf4_t test_vfrsqrt7_v_f16mf4(vfloat16mf4_t op1, size_t vl) { return vfrsqrt7_v_f16mf4(op1, vl); } @@ -191,7 +191,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrsqrt7_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { +vfloat16mf2_t test_vfrsqrt7_v_f16mf2(vfloat16mf2_t op1, size_t vl) { return vfrsqrt7_v_f16mf2(op1, vl); } @@ -200,7 +200,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrsqrt7_v_f16m1 (vfloat16m1_t op1, size_t vl) { +vfloat16m1_t test_vfrsqrt7_v_f16m1(vfloat16m1_t op1, size_t vl) { return vfrsqrt7_v_f16m1(op1, vl); } @@ -209,7 +209,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrsqrt7_v_f16m2 (vfloat16m2_t op1, size_t vl) { +vfloat16m2_t test_vfrsqrt7_v_f16m2(vfloat16m2_t op1, size_t vl) { return vfrsqrt7_v_f16m2(op1, vl); } @@ -218,7 +218,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrsqrt7_v_f16m4 (vfloat16m4_t op1, size_t vl) { +vfloat16m4_t test_vfrsqrt7_v_f16m4(vfloat16m4_t op1, size_t vl) { return vfrsqrt7_v_f16m4(op1, vl); } @@ -227,7 +227,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv32f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrsqrt7_v_f16m8 (vfloat16m8_t op1, size_t vl) { +vfloat16m8_t test_vfrsqrt7_v_f16m8(vfloat16m8_t op1, size_t vl) { return vfrsqrt7_v_f16m8(op1, vl); } @@ -236,7 +236,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrsqrt7_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { return vfrsqrt7_v_f16mf4_m(mask, maskedoff, op1, vl); } @@ -245,7 +245,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrsqrt7_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { return vfrsqrt7_v_f16mf2_m(mask, maskedoff, op1, vl); } @@ -254,7 +254,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrsqrt7_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { +vfloat16m1_t test_vfrsqrt7_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { return vfrsqrt7_v_f16m1_m(mask, maskedoff, op1, vl); } @@ -263,7 +263,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrsqrt7_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { +vfloat16m2_t test_vfrsqrt7_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { return vfrsqrt7_v_f16m2_m(mask, maskedoff, op1, vl); } @@ -272,7 +272,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrsqrt7_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { +vfloat16m4_t test_vfrsqrt7_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { return vfrsqrt7_v_f16m4_m(mask, maskedoff, op1, vl); } @@ -281,6 +281,60 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrsqrt7_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { +vfloat16m8_t test_vfrsqrt7_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { return vfrsqrt7_v_f16m8_m(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7_v_f32mf2_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7_v_f32mf2_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7_v_f32mf2_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7_v_f32mf2_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7_v_f32mf2_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7_v_f32mf2_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c @@ -182,7 +182,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfrsub_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfrsub_vf_f16mf4(op1, op2, vl); } @@ -191,7 +191,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfrsub_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfrsub_vf_f16mf2(op1, op2, vl); } @@ -200,7 +200,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfrsub_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfrsub_vf_f16m1(op1, op2, vl); } @@ -209,7 +209,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfrsub_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfrsub_vf_f16m2(op1, op2, vl); } @@ -218,7 +218,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfrsub_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfrsub_vf_f16m4(op1, op2, vl); } @@ -227,7 +227,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrsub_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfrsub_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfrsub_vf_f16m8(op1, op2, vl); } @@ -236,7 +236,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfrsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfrsub_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -245,7 +245,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfrsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfrsub_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -254,7 +254,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfrsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfrsub_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -263,7 +263,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfrsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfrsub_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -272,7 +272,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfrsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfrsub_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -281,6 +281,60 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfrsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfrsub_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsub_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrsub_vf_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsub_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfrsub_vf_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsub_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrsub_vf_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrsub_vf_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsub_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrsub_vf_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsub_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfrsub_vf_f32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c @@ -1093,7 +1093,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnj_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfsgnj_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfsgnj_vv_f16mf4(op1, op2, vl); } @@ -1102,7 +1102,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnj_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfsgnj_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfsgnj_vf_f16mf4(op1, op2, vl); } @@ -1111,7 +1111,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnj_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfsgnj_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfsgnj_vv_f16mf2(op1, op2, vl); } @@ -1120,7 +1120,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnj_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfsgnj_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfsgnj_vf_f16mf2(op1, op2, vl); } @@ -1129,7 +1129,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnj_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfsgnj_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfsgnj_vv_f16m1(op1, op2, vl); } @@ -1138,7 +1138,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnj_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfsgnj_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfsgnj_vf_f16m1(op1, op2, vl); } @@ -1147,7 +1147,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnj_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfsgnj_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfsgnj_vv_f16m2(op1, op2, vl); } @@ -1156,7 +1156,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnj_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfsgnj_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfsgnj_vf_f16m2(op1, op2, vl); } @@ -1165,7 +1165,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnj_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfsgnj_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfsgnj_vv_f16m4(op1, op2, vl); } @@ -1174,7 +1174,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnj_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfsgnj_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfsgnj_vf_f16m4(op1, op2, vl); } @@ -1183,7 +1183,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnj_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfsgnj_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfsgnj_vv_f16m8(op1, op2, vl); } @@ -1192,7 +1192,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnj_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfsgnj_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfsgnj_vf_f16m8(op1, op2, vl); } @@ -1201,7 +1201,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjn_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfsgnjn_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfsgnjn_vv_f16mf4(op1, op2, vl); } @@ -1210,7 +1210,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjn_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfsgnjn_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfsgnjn_vf_f16mf4(op1, op2, vl); } @@ -1219,7 +1219,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjn_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfsgnjn_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfsgnjn_vv_f16mf2(op1, op2, vl); } @@ -1228,7 +1228,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjn_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfsgnjn_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfsgnjn_vf_f16mf2(op1, op2, vl); } @@ -1237,7 +1237,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjn_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfsgnjn_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfsgnjn_vv_f16m1(op1, op2, vl); } @@ -1246,7 +1246,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjn_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfsgnjn_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfsgnjn_vf_f16m1(op1, op2, vl); } @@ -1255,7 +1255,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjn_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfsgnjn_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfsgnjn_vv_f16m2(op1, op2, vl); } @@ -1264,7 +1264,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjn_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfsgnjn_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfsgnjn_vf_f16m2(op1, op2, vl); } @@ -1273,7 +1273,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjn_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfsgnjn_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfsgnjn_vv_f16m4(op1, op2, vl); } @@ -1282,7 +1282,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjn_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfsgnjn_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfsgnjn_vf_f16m4(op1, op2, vl); } @@ -1291,7 +1291,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjn_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfsgnjn_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfsgnjn_vv_f16m8(op1, op2, vl); } @@ -1300,7 +1300,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjn_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfsgnjn_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfsgnjn_vf_f16m8(op1, op2, vl); } @@ -1309,7 +1309,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjx_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfsgnjx_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfsgnjx_vv_f16mf4(op1, op2, vl); } @@ -1318,7 +1318,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjx_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfsgnjx_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfsgnjx_vf_f16mf4(op1, op2, vl); } @@ -1327,7 +1327,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjx_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfsgnjx_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfsgnjx_vv_f16mf2(op1, op2, vl); } @@ -1336,7 +1336,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjx_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfsgnjx_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfsgnjx_vf_f16mf2(op1, op2, vl); } @@ -1345,7 +1345,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjx_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfsgnjx_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfsgnjx_vv_f16m1(op1, op2, vl); } @@ -1354,7 +1354,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjx_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfsgnjx_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfsgnjx_vf_f16m1(op1, op2, vl); } @@ -1363,7 +1363,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjx_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfsgnjx_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfsgnjx_vv_f16m2(op1, op2, vl); } @@ -1372,7 +1372,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjx_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfsgnjx_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfsgnjx_vf_f16m2(op1, op2, vl); } @@ -1381,7 +1381,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjx_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfsgnjx_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfsgnjx_vv_f16m4(op1, op2, vl); } @@ -1390,7 +1390,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjx_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfsgnjx_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfsgnjx_vf_f16m4(op1, op2, vl); } @@ -1399,7 +1399,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjx_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfsgnjx_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfsgnjx_vv_f16m8(op1, op2, vl); } @@ -1408,7 +1408,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjx_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfsgnjx_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfsgnjx_vf_f16m8(op1, op2, vl); } @@ -1417,7 +1417,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnj_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfsgnj_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfsgnj_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -1426,7 +1426,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnj_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfsgnj_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfsgnj_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -1435,7 +1435,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnj_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfsgnj_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfsgnj_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -1444,7 +1444,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnj_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfsgnj_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfsgnj_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -1453,7 +1453,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnj_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfsgnj_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfsgnj_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -1462,7 +1462,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnj_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfsgnj_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfsgnj_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -1471,7 +1471,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnj_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfsgnj_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfsgnj_vv_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -1480,7 +1480,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnj_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfsgnj_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfsgnj_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -1489,7 +1489,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnj_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfsgnj_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfsgnj_vv_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -1498,7 +1498,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnj_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfsgnj_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfsgnj_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -1507,7 +1507,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnj_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfsgnj_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfsgnj_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } @@ -1516,7 +1516,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnj_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfsgnj_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfsgnj_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } @@ -1525,7 +1525,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjn_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfsgnjn_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -1534,7 +1534,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjn_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfsgnjn_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -1543,7 +1543,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjn_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfsgnjn_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -1552,7 +1552,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjn_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfsgnjn_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -1561,7 +1561,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjn_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfsgnjn_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfsgnjn_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -1570,7 +1570,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjn_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfsgnjn_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfsgnjn_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -1579,7 +1579,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjn_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfsgnjn_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfsgnjn_vv_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -1588,7 +1588,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjn_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfsgnjn_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfsgnjn_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -1597,7 +1597,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjn_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfsgnjn_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfsgnjn_vv_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -1606,7 +1606,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjn_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfsgnjn_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfsgnjn_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -1615,7 +1615,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjn_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfsgnjn_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfsgnjn_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } @@ -1624,7 +1624,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjn_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfsgnjn_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfsgnjn_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } @@ -1633,7 +1633,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjx_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfsgnjx_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -1642,7 +1642,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjx_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfsgnjx_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -1651,7 +1651,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjx_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfsgnjx_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -1660,7 +1660,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjx_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfsgnjx_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -1669,7 +1669,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjx_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfsgnjx_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfsgnjx_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -1678,7 +1678,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjx_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfsgnjx_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfsgnjx_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -1687,7 +1687,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjx_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfsgnjx_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfsgnjx_vv_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -1696,7 +1696,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjx_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfsgnjx_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfsgnjx_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -1705,7 +1705,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjx_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfsgnjx_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfsgnjx_vv_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -1714,7 +1714,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjx_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfsgnjx_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfsgnjx_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -1723,7 +1723,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjx_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfsgnjx_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfsgnjx_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } @@ -1732,6 +1732,330 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjx_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfsgnjx_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfsgnjx_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnj_vv_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnj_vf_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjn_vv_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjn_vf_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjx_vv_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjx_vf_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnj_vv_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnj_vf_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjn_vv_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjn_vf_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjx_vv_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjx_vf_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnj_vv_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnj_vf_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjn_vv_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjn_vf_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjx_vv_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjx_vf_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnj_vv_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnj_vf_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjn_vv_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjn_vf_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjx_vv_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjx_vf_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnj_vv_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnj_vf_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjn_vv_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjn_vf_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjx_vv_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjx_vf_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnj_vv_f32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnj_vf_f32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjn_vv_f32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjn_vf_f32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjx_vv_f32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjx_vf_f32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c @@ -308,3 +308,57 @@ vfloat16m8_t test_vfslide1down_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { return vfslide1down_vf_f16m8_m(mask, maskedoff, src, value, vl); } + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( [[MERGE:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1down_vf_f32mf2_tu(merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1down_vf_f32mf2_ta(vfloat32mf2_t src, float value, size_t vl) { + return vfslide1down_vf_f32mf2_ta(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1down_vf_f32mf2_tuma(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1down_vf_f32mf2_tumu(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1down_vf_f32mf2_tama(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1down_vf_f32mf2_tamu(mask, merge, src, value, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c @@ -303,3 +303,57 @@ vfloat16m8_t test_vfslide1up_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { return vfslide1up_vf_f16m8_m(mask, maskedoff, src, value, vl); } + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( [[MERGE:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1up_vf_f32mf2_tu(merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1up_vf_f32mf2_ta(vfloat32mf2_t src, float value, size_t vl) { + return vfslide1up_vf_f32mf2_ta(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1up_vf_f32mf2_tuma(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1up_vf_f32mf2_tumu(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1up_vf_f32mf2_tama(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, float value, size_t vl) { + return vfslide1up_vf_f32mf2_tamu(mask, merge, src, value, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c @@ -182,7 +182,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsqrt_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { +vfloat16mf4_t test_vfsqrt_v_f16mf4(vfloat16mf4_t op1, size_t vl) { return vfsqrt_v_f16mf4(op1, vl); } @@ -191,7 +191,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsqrt_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { +vfloat16mf2_t test_vfsqrt_v_f16mf2(vfloat16mf2_t op1, size_t vl) { return vfsqrt_v_f16mf2(op1, vl); } @@ -200,7 +200,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsqrt_v_f16m1 (vfloat16m1_t op1, size_t vl) { +vfloat16m1_t test_vfsqrt_v_f16m1(vfloat16m1_t op1, size_t vl) { return vfsqrt_v_f16m1(op1, vl); } @@ -209,7 +209,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsqrt_v_f16m2 (vfloat16m2_t op1, size_t vl) { +vfloat16m2_t test_vfsqrt_v_f16m2(vfloat16m2_t op1, size_t vl) { return vfsqrt_v_f16m2(op1, vl); } @@ -218,7 +218,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv16f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsqrt_v_f16m4 (vfloat16m4_t op1, size_t vl) { +vfloat16m4_t test_vfsqrt_v_f16m4(vfloat16m4_t op1, size_t vl) { return vfsqrt_v_f16m4(op1, vl); } @@ -227,7 +227,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv32f16.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsqrt_v_f16m8 (vfloat16m8_t op1, size_t vl) { +vfloat16m8_t test_vfsqrt_v_f16m8(vfloat16m8_t op1, size_t vl) { return vfsqrt_v_f16m8(op1, vl); } @@ -236,7 +236,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsqrt_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { +vfloat16mf4_t test_vfsqrt_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { return vfsqrt_v_f16mf4_m(mask, maskedoff, op1, vl); } @@ -245,7 +245,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsqrt_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { +vfloat16mf2_t test_vfsqrt_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { return vfsqrt_v_f16mf2_m(mask, maskedoff, op1, vl); } @@ -254,7 +254,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsqrt_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { +vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { return vfsqrt_v_f16m1_m(mask, maskedoff, op1, vl); } @@ -263,7 +263,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsqrt_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { +vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { return vfsqrt_v_f16m2_m(mask, maskedoff, op1, vl); } @@ -272,7 +272,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsqrt_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { +vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { return vfsqrt_v_f16m4_m(mask, maskedoff, op1, vl); } @@ -281,6 +281,60 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsqrt_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { +vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { return vfsqrt_v_f16m8_m(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsqrt_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfsqrt_v_f32mf2_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsqrt_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) { + return vfsqrt_v_f32mf2_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsqrt_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfsqrt_v_f32mf2_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsqrt_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfsqrt_v_f32mf2_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsqrt_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfsqrt_v_f32mf2_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsqrt_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfsqrt_v_f32mf2_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c @@ -371,7 +371,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsub_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfsub_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfsub_vv_f16mf4(op1, op2, vl); } @@ -380,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfsub_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfsub_vf_f16mf4(op1, op2, vl); } @@ -389,7 +389,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsub_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfsub_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfsub_vv_f16mf2(op1, op2, vl); } @@ -398,7 +398,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfsub_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfsub_vf_f16mf2(op1, op2, vl); } @@ -407,7 +407,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsub_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfsub_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfsub_vv_f16m1(op1, op2, vl); } @@ -416,7 +416,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfsub_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfsub_vf_f16m1(op1, op2, vl); } @@ -425,7 +425,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsub_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfsub_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfsub_vv_f16m2(op1, op2, vl); } @@ -434,7 +434,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfsub_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfsub_vf_f16m2(op1, op2, vl); } @@ -443,7 +443,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsub_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfsub_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfsub_vv_f16m4(op1, op2, vl); } @@ -452,7 +452,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfsub_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfsub_vf_f16m4(op1, op2, vl); } @@ -461,7 +461,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsub_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfsub_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfsub_vv_f16m8(op1, op2, vl); } @@ -470,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsub_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfsub_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfsub_vf_f16m8(op1, op2, vl); } @@ -479,7 +479,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsub_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vfsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfsub_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -488,7 +488,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfsub_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); } @@ -497,7 +497,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsub_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vfsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfsub_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -506,7 +506,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfsub_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); } @@ -515,7 +515,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsub_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vfsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfsub_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -524,7 +524,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfsub_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } @@ -533,7 +533,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsub_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vfsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfsub_vv_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -542,7 +542,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfsub_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } @@ -551,7 +551,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsub_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vfsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfsub_vv_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -560,7 +560,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfsub_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } @@ -569,7 +569,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsub_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vfsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vfsub_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } @@ -578,6 +578,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfsub_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsub_vv_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vf_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsub_vf_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vv_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsub_vv_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vf_f32mf2_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfsub_vf_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsub_vv_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vf_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsub_vf_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsub_vv_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsub_vf_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsub_vv_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vf_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsub_vf_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsub_vv_f32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsub_vf_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsub_vf_f32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c @@ -331,7 +331,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat32mf2_t test_vfwadd_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfwadd_vv_f32mf2(op1, op2, vl); } @@ -340,7 +340,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat32mf2_t test_vfwadd_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfwadd_vf_f32mf2(op1, op2, vl); } @@ -349,7 +349,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat32mf2_t test_vfwadd_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { return vfwadd_wv_f32mf2(op1, op2, vl); } @@ -358,7 +358,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) { +vfloat32mf2_t test_vfwadd_wf_f32mf2(vfloat32mf2_t op1, _Float16 op2, size_t vl) { return vfwadd_wf_f32mf2(op1, op2, vl); } @@ -367,7 +367,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat32m1_t test_vfwadd_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfwadd_vv_f32m1(op1, op2, vl); } @@ -376,7 +376,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat32m1_t test_vfwadd_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfwadd_vf_f32m1(op1, op2, vl); } @@ -385,7 +385,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat32m1_t test_vfwadd_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { return vfwadd_wv_f32m1(op1, op2, vl); } @@ -394,7 +394,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) { +vfloat32m1_t test_vfwadd_wf_f32m1(vfloat32m1_t op1, _Float16 op2, size_t vl) { return vfwadd_wf_f32m1(op1, op2, vl); } @@ -403,7 +403,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat32m2_t test_vfwadd_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfwadd_vv_f32m2(op1, op2, vl); } @@ -412,7 +412,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat32m2_t test_vfwadd_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfwadd_vf_f32m2(op1, op2, vl); } @@ -421,7 +421,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { +vfloat32m2_t test_vfwadd_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { return vfwadd_wv_f32m2(op1, op2, vl); } @@ -430,7 +430,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) { +vfloat32m2_t test_vfwadd_wf_f32m2(vfloat32m2_t op1, _Float16 op2, size_t vl) { return vfwadd_wf_f32m2(op1, op2, vl); } @@ -439,7 +439,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat32m4_t test_vfwadd_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfwadd_vv_f32m4(op1, op2, vl); } @@ -448,7 +448,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat32m4_t test_vfwadd_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfwadd_vf_f32m4(op1, op2, vl); } @@ -457,7 +457,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { +vfloat32m4_t test_vfwadd_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { return vfwadd_wv_f32m4(op1, op2, vl); } @@ -466,7 +466,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) { +vfloat32m4_t test_vfwadd_wf_f32m4(vfloat32m4_t op1, _Float16 op2, size_t vl) { return vfwadd_wf_f32m4(op1, op2, vl); } @@ -475,7 +475,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat32m8_t test_vfwadd_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfwadd_vv_f32m8(op1, op2, vl); } @@ -484,7 +484,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat32m8_t test_vfwadd_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfwadd_vf_f32m8(op1, op2, vl); } @@ -493,7 +493,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { +vfloat32m8_t test_vfwadd_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { return vfwadd_wv_f32m8(op1, op2, vl); } @@ -502,7 +502,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_wf_f32m8 (vfloat32m8_t op1, _Float16 op2, size_t vl) { +vfloat32m8_t test_vfwadd_wf_f32m8(vfloat32m8_t op1, _Float16 op2, size_t vl) { return vfwadd_wf_f32m8(op1, op2, vl); } @@ -511,7 +511,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat32mf2_t test_vfwadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfwadd_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); } @@ -520,7 +520,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat32mf2_t test_vfwadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfwadd_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); } @@ -529,7 +529,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_wv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat32mf2_t test_vfwadd_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { return vfwadd_wv_f32mf2_m(mask, maskedoff, op1, op2, vl); } @@ -538,7 +538,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_wf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { +vfloat32mf2_t test_vfwadd_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { return vfwadd_wf_f32mf2_m(mask, maskedoff, op1, op2, vl); } @@ -547,7 +547,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat32m1_t test_vfwadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfwadd_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } @@ -556,7 +556,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat32m1_t test_vfwadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfwadd_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } @@ -565,7 +565,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_wv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat32m1_t test_vfwadd_wv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { return vfwadd_wv_f32m1_m(mask, maskedoff, op1, op2, vl); } @@ -574,7 +574,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_wf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { +vfloat32m1_t test_vfwadd_wf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { return vfwadd_wf_f32m1_m(mask, maskedoff, op1, op2, vl); } @@ -583,7 +583,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat32m2_t test_vfwadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfwadd_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } @@ -592,7 +592,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat32m2_t test_vfwadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfwadd_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } @@ -601,7 +601,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_wv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { +vfloat32m2_t test_vfwadd_wv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { return vfwadd_wv_f32m2_m(mask, maskedoff, op1, op2, vl); } @@ -610,7 +610,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_wf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { +vfloat32m2_t test_vfwadd_wf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { return vfwadd_wf_f32m2_m(mask, maskedoff, op1, op2, vl); } @@ -619,7 +619,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat32m4_t test_vfwadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfwadd_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } @@ -628,7 +628,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat32m4_t test_vfwadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfwadd_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } @@ -637,7 +637,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_wv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { +vfloat32m4_t test_vfwadd_wv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { return vfwadd_wv_f32m4_m(mask, maskedoff, op1, op2, vl); } @@ -646,7 +646,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_wf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { +vfloat32m4_t test_vfwadd_wf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { return vfwadd_wf_f32m4_m(mask, maskedoff, op1, op2, vl); } @@ -655,7 +655,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat32m8_t test_vfwadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfwadd_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } @@ -664,7 +664,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat32m8_t test_vfwadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfwadd_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } @@ -673,7 +673,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_wv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { +vfloat32m8_t test_vfwadd_wv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { return vfwadd_wv_f32m8_m(mask, maskedoff, op1, op2, vl); } @@ -682,6 +682,222 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_wf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { +vfloat32m8_t test_vfwadd_wf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { return vfwadd_wf_f32m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vv_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_vv_f64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vf_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wv_f64m1_tu(vfloat64m1_t merge, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_wv_f64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wf_f64m1_tu(vfloat64m1_t merge, vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vv_f64m1_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_vv_f64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vf_f64m1_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wv_f64m1_ta(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_wv_f64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wf_f64m1_ta(vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vv_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_vv_f64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vf_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wv_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_wv_f64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wf_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_vv_f64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wv_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_wv_f64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vv_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_vv_f64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vf_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wv_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_wv_f64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wf_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vv_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_vv_f64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vf_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wv_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwadd_wv_f64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wf_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m1_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c @@ -738,7 +738,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4 (vint8mf8_t src, size_t vl) { +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4(vint8mf8_t src, size_t vl) { return vfwcvt_f_x_v_f16mf4(src, vl); } @@ -747,7 +747,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2 (vint8mf4_t src, size_t vl) { +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2(vint8mf4_t src, size_t vl) { return vfwcvt_f_x_v_f16mf2(src, vl); } @@ -756,7 +756,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfwcvt_f_x_v_f16m1 (vint8mf2_t src, size_t vl) { +vfloat16m1_t test_vfwcvt_f_x_v_f16m1(vint8mf2_t src, size_t vl) { return vfwcvt_f_x_v_f16m1(src, vl); } @@ -765,7 +765,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfwcvt_f_x_v_f16m2 (vint8m1_t src, size_t vl) { +vfloat16m2_t test_vfwcvt_f_x_v_f16m2(vint8m1_t src, size_t vl) { return vfwcvt_f_x_v_f16m2(src, vl); } @@ -774,7 +774,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfwcvt_f_x_v_f16m4 (vint8m2_t src, size_t vl) { +vfloat16m4_t test_vfwcvt_f_x_v_f16m4(vint8m2_t src, size_t vl) { return vfwcvt_f_x_v_f16m4(src, vl); } @@ -783,7 +783,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfwcvt_f_x_v_f16m8 (vint8m4_t src, size_t vl) { +vfloat16m8_t test_vfwcvt_f_x_v_f16m8(vint8m4_t src, size_t vl) { return vfwcvt_f_x_v_f16m8(src, vl); } @@ -792,7 +792,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4 (vuint8mf8_t src, size_t vl) { +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4(vuint8mf8_t src, size_t vl) { return vfwcvt_f_xu_v_f16mf4(src, vl); } @@ -801,7 +801,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2 (vuint8mf4_t src, size_t vl) { +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2(vuint8mf4_t src, size_t vl) { return vfwcvt_f_xu_v_f16mf2(src, vl); } @@ -810,7 +810,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1 (vuint8mf2_t src, size_t vl) { +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1(vuint8mf2_t src, size_t vl) { return vfwcvt_f_xu_v_f16m1(src, vl); } @@ -819,7 +819,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2 (vuint8m1_t src, size_t vl) { +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2(vuint8m1_t src, size_t vl) { return vfwcvt_f_xu_v_f16m2(src, vl); } @@ -828,7 +828,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4 (vuint8m2_t src, size_t vl) { +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4(vuint8m2_t src, size_t vl) { return vfwcvt_f_xu_v_f16m4(src, vl); } @@ -837,7 +837,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8 (vuint8m4_t src, size_t vl) { +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8(vuint8m4_t src, size_t vl) { return vfwcvt_f_xu_v_f16m8(src, vl); } @@ -846,7 +846,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfwcvt_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl) { +vint32mf2_t test_vfwcvt_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) { return vfwcvt_x_f_v_i32mf2(src, vl); } @@ -855,7 +855,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl) { +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32mf2(src, vl); } @@ -864,7 +864,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfwcvt_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl) { +vint32m1_t test_vfwcvt_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) { return vfwcvt_x_f_v_i32m1(src, vl); } @@ -873,7 +873,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl) { +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32m1(src, vl); } @@ -882,7 +882,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfwcvt_x_f_v_i32m2 (vfloat16m1_t src, size_t vl) { +vint32m2_t test_vfwcvt_x_f_v_i32m2(vfloat16m1_t src, size_t vl) { return vfwcvt_x_f_v_i32m2(src, vl); } @@ -891,7 +891,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2 (vfloat16m1_t src, size_t vl) { +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2(vfloat16m1_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32m2(src, vl); } @@ -900,7 +900,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfwcvt_x_f_v_i32m4 (vfloat16m2_t src, size_t vl) { +vint32m4_t test_vfwcvt_x_f_v_i32m4(vfloat16m2_t src, size_t vl) { return vfwcvt_x_f_v_i32m4(src, vl); } @@ -909,7 +909,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4 (vfloat16m2_t src, size_t vl) { +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4(vfloat16m2_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32m4(src, vl); } @@ -918,7 +918,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfwcvt_x_f_v_i32m8 (vfloat16m4_t src, size_t vl) { +vint32m8_t test_vfwcvt_x_f_v_i32m8(vfloat16m4_t src, size_t vl) { return vfwcvt_x_f_v_i32m8(src, vl); } @@ -927,7 +927,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8 (vfloat16m4_t src, size_t vl) { +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8(vfloat16m4_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32m8(src, vl); } @@ -936,7 +936,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl) { +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) { return vfwcvt_xu_f_v_u32mf2(src, vl); } @@ -945,7 +945,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl) { +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32mf2(src, vl); } @@ -954,7 +954,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfwcvt_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl) { +vuint32m1_t test_vfwcvt_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) { return vfwcvt_xu_f_v_u32m1(src, vl); } @@ -963,7 +963,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl) { +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32m1(src, vl); } @@ -972,7 +972,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfwcvt_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl) { +vuint32m2_t test_vfwcvt_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) { return vfwcvt_xu_f_v_u32m2(src, vl); } @@ -981,7 +981,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl) { +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32m2(src, vl); } @@ -990,7 +990,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfwcvt_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl) { +vuint32m4_t test_vfwcvt_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) { return vfwcvt_xu_f_v_u32m4(src, vl); } @@ -999,7 +999,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl) { +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32m4(src, vl); } @@ -1008,7 +1008,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfwcvt_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl) { +vuint32m8_t test_vfwcvt_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) { return vfwcvt_xu_f_v_u32m8(src, vl); } @@ -1017,7 +1017,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl) { +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32m8(src, vl); } @@ -1026,7 +1026,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2 (vfloat16mf4_t src, size_t vl) { +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2(vfloat16mf4_t src, size_t vl) { return vfwcvt_f_f_v_f32mf2(src, vl); } @@ -1035,7 +1035,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwcvt_f_f_v_f32m1 (vfloat16mf2_t src, size_t vl) { +vfloat32m1_t test_vfwcvt_f_f_v_f32m1(vfloat16mf2_t src, size_t vl) { return vfwcvt_f_f_v_f32m1(src, vl); } @@ -1044,7 +1044,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwcvt_f_f_v_f32m2 (vfloat16m1_t src, size_t vl) { +vfloat32m2_t test_vfwcvt_f_f_v_f32m2(vfloat16m1_t src, size_t vl) { return vfwcvt_f_f_v_f32m2(src, vl); } @@ -1053,7 +1053,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwcvt_f_f_v_f32m4 (vfloat16m2_t src, size_t vl) { +vfloat32m4_t test_vfwcvt_f_f_v_f32m4(vfloat16m2_t src, size_t vl) { return vfwcvt_f_f_v_f32m4(src, vl); } @@ -1062,7 +1062,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwcvt_f_f_v_f32m8 (vfloat16m4_t src, size_t vl) { +vfloat32m8_t test_vfwcvt_f_f_v_f32m8(vfloat16m4_t src, size_t vl) { return vfwcvt_f_f_v_f32m8(src, vl); } @@ -1071,7 +1071,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { return vfwcvt_f_x_v_f16mf4_m(mask, maskedoff, src, vl); } @@ -1080,7 +1080,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { return vfwcvt_f_x_v_f16mf2_m(mask, maskedoff, src, vl); } @@ -1089,7 +1089,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { return vfwcvt_f_x_v_f16m1_m(mask, maskedoff, src, vl); } @@ -1098,7 +1098,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { return vfwcvt_f_x_v_f16m2_m(mask, maskedoff, src, vl); } @@ -1107,7 +1107,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { return vfwcvt_f_x_v_f16m4_m(mask, maskedoff, src, vl); } @@ -1116,7 +1116,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { return vfwcvt_f_x_v_f16m8_m(mask, maskedoff, src, vl); } @@ -1125,7 +1125,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { return vfwcvt_f_xu_v_f16mf4_m(mask, maskedoff, src, vl); } @@ -1134,7 +1134,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { return vfwcvt_f_xu_v_f16mf2_m(mask, maskedoff, src, vl); } @@ -1143,7 +1143,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { return vfwcvt_f_xu_v_f16m1_m(mask, maskedoff, src, vl); } @@ -1152,7 +1152,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { return vfwcvt_f_xu_v_f16m2_m(mask, maskedoff, src, vl); } @@ -1161,7 +1161,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { return vfwcvt_f_xu_v_f16m4_m(mask, maskedoff, src, vl); } @@ -1170,7 +1170,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { return vfwcvt_f_xu_v_f16m8_m(mask, maskedoff, src, vl); } @@ -1179,7 +1179,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfwcvt_x_f_v_i32mf2_m(mask, maskedoff, src, vl); } @@ -1188,7 +1188,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32mf2_m(mask, maskedoff, src, vl); } @@ -1197,7 +1197,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfwcvt_x_f_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { +vint32m1_t test_vfwcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfwcvt_x_f_v_i32m1_m(mask, maskedoff, src, vl); } @@ -1206,7 +1206,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32m1_m(mask, maskedoff, src, vl); } @@ -1215,7 +1215,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfwcvt_x_f_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { +vint32m2_t test_vfwcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { return vfwcvt_x_f_v_i32m2_m(mask, maskedoff, src, vl); } @@ -1224,7 +1224,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32m2_m(mask, maskedoff, src, vl); } @@ -1233,7 +1233,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfwcvt_x_f_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { +vint32m4_t test_vfwcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { return vfwcvt_x_f_v_i32m4_m(mask, maskedoff, src, vl); } @@ -1242,7 +1242,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32m4_m(mask, maskedoff, src, vl); } @@ -1251,7 +1251,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfwcvt_x_f_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { +vint32m8_t test_vfwcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { return vfwcvt_x_f_v_i32m8_m(mask, maskedoff, src, vl); } @@ -1260,7 +1260,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32m8_m(mask, maskedoff, src, vl); } @@ -1269,7 +1269,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfwcvt_xu_f_v_u32mf2_m(mask, maskedoff, src, vl); } @@ -1278,7 +1278,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32mf2_m(mask, maskedoff, src, vl); } @@ -1287,7 +1287,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfwcvt_xu_f_v_u32m1_m(mask, maskedoff, src, vl); } @@ -1296,7 +1296,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32m1_m(mask, maskedoff, src, vl); } @@ -1305,7 +1305,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { return vfwcvt_xu_f_v_u32m2_m(mask, maskedoff, src, vl); } @@ -1314,7 +1314,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32m2_m(mask, maskedoff, src, vl); } @@ -1323,7 +1323,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { return vfwcvt_xu_f_v_u32m4_m(mask, maskedoff, src, vl); } @@ -1332,7 +1332,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32m4_m(mask, maskedoff, src, vl); } @@ -1341,7 +1341,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { return vfwcvt_xu_f_v_u32m8_m(mask, maskedoff, src, vl); } @@ -1350,7 +1350,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32m8_m(mask, maskedoff, src, vl); } @@ -1359,7 +1359,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfwcvt_f_f_v_f32mf2_m(mask, maskedoff, src, vl); } @@ -1368,7 +1368,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfwcvt_f_f_v_f32m1_m(mask, maskedoff, src, vl); } @@ -1377,7 +1377,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { return vfwcvt_f_f_v_f32m2_m(mask, maskedoff, src, vl); } @@ -1386,7 +1386,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { return vfwcvt_f_f_v_f32m4_m(mask, maskedoff, src, vl); } @@ -1395,6 +1395,384 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { return vfwcvt_f_f_v_f32m8_m(mask, maskedoff, src, vl); } + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_x_f_v_i64m1_tu(vint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_x_f_v_i64m1_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tu(vint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i64m1_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tu(vuint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_xu_f_v_u64m1_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u64m1_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tu(vfloat64m1_t merge, vint32mf2_t src, size_t vl) { + return vfwcvt_f_x_v_f64m1_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tu(vfloat64m1_t merge, vuint32mf2_t src, size_t vl) { + return vfwcvt_f_xu_v_f64m1_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_f_f_v_f64m1_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_x_f_v_i64m1_ta(vfloat32mf2_t src, size_t vl) { + return vfwcvt_x_f_v_i64m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_ta(vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i64m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_ta(vfloat32mf2_t src, size_t vl) { + return vfwcvt_xu_f_v_u64m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_ta(vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u64m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_ta(vint32mf2_t src, size_t vl) { + return vfwcvt_f_x_v_f64m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_ta(vuint32mf2_t src, size_t vl) { + return vfwcvt_f_xu_v_f64m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_ta(vfloat32mf2_t src, size_t vl) { + return vfwcvt_f_f_v_f64m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_x_f_v_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_x_f_v_i64m1_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i64m1_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_xu_f_v_u64m1_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u64m1_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vint32mf2_t src, size_t vl) { + return vfwcvt_f_x_v_f64m1_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vuint32mf2_t src, size_t vl) { + return vfwcvt_f_xu_v_f64m1_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_f_f_v_f64m1_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_x_f_v_i64m1_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i64m1_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_xu_f_v_u64m1_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u64m1_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vint32mf2_t src, size_t vl) { + return vfwcvt_f_x_v_f64m1_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vuint32mf2_t src, size_t vl) { + return vfwcvt_f_xu_v_f64m1_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_f_f_v_f64m1_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_x_f_v_i64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_x_f_v_i64m1_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i64m1_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_xu_f_v_u64m1_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u64m1_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tama(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vfwcvt_f_x_v_f64m1_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vfwcvt_f_xu_v_f64m1_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tama(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vfwcvt_f_f_v_f64m1_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_x_f_v_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_x_f_v_i64m1_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i64m1_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_xu_f_v_u64m1_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u64m1_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vint32mf2_t src, size_t vl) { + return vfwcvt_f_x_v_f64m1_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vuint32mf2_t src, size_t vl) { + return vfwcvt_f_xu_v_f64m1_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t src, size_t vl) { + return vfwcvt_f_f_v_f64m1_tamu(mask, merge, src, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c @@ -175,7 +175,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmacc_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfwmacc_vv_f32mf2(vd, vs1, vs2, vl); } @@ -184,7 +184,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmacc_vf_f32mf2 (vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmacc_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { return vfwmacc_vf_f32mf2(vd, vs1, vs2, vl); } @@ -193,7 +193,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmacc_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmacc_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfwmacc_vv_f32m1(vd, vs1, vs2, vl); } @@ -202,7 +202,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmacc_vf_f32m1 (vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmacc_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { return vfwmacc_vf_f32m1(vd, vs1, vs2, vl); } @@ -211,7 +211,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmacc_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmacc_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfwmacc_vv_f32m2(vd, vs1, vs2, vl); } @@ -220,7 +220,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmacc_vf_f32m2 (vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmacc_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { return vfwmacc_vf_f32m2(vd, vs1, vs2, vl); } @@ -229,7 +229,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmacc_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmacc_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfwmacc_vv_f32m4(vd, vs1, vs2, vl); } @@ -238,7 +238,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmacc_vf_f32m4 (vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmacc_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { return vfwmacc_vf_f32m4(vd, vs1, vs2, vl); } @@ -247,7 +247,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmacc_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmacc_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfwmacc_vv_f32m8(vd, vs1, vs2, vl); } @@ -256,7 +256,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmacc_vf_f32m8 (vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmacc_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { return vfwmacc_vf_f32m8(vd, vs1, vs2, vl); } @@ -265,7 +265,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmacc_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfwmacc_vv_f32mf2_m(mask, vd, vs1, vs2, vl); } @@ -274,7 +274,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmacc_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { return vfwmacc_vf_f32mf2_m(mask, vd, vs1, vs2, vl); } @@ -283,7 +283,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmacc_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfwmacc_vv_f32m1_m(mask, vd, vs1, vs2, vl); } @@ -292,7 +292,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmacc_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { return vfwmacc_vf_f32m1_m(mask, vd, vs1, vs2, vl); } @@ -301,7 +301,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmacc_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfwmacc_vv_f32m2_m(mask, vd, vs1, vs2, vl); } @@ -310,7 +310,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmacc_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { return vfwmacc_vf_f32m2_m(mask, vd, vs1, vs2, vl); } @@ -319,7 +319,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmacc_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfwmacc_vv_f32m4_m(mask, vd, vs1, vs2, vl); } @@ -328,7 +328,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmacc_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { return vfwmacc_vf_f32m4_m(mask, vd, vs1, vs2, vl); } @@ -337,7 +337,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmacc_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfwmacc_vv_f32m8_m(mask, vd, vs1, vs2, vl); } @@ -346,6 +346,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmacc_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { return vfwmacc_vf_f32m8_m(mask, vd, vs1, vs2, vl); } + +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_vv_f64m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_vf_f64m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vv_f64m1_ta(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_vv_f64m1_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vf_f64m1_ta(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_vf_f64m1_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vv_f64m1_tuma(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_vv_f64m1_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vf_f64m1_tuma(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_vf_f64m1_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_vf_f64m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vv_f64m1_tama(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_vv_f64m1_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vf_f64m1_tama(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_vf_f64m1_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vv_f64m1_tamu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_vv_f64m1_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vf_f64m1_tamu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmacc_vf_f64m1_tamu(mask, vd, vs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c @@ -175,7 +175,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmsac_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfwmsac_vv_f32mf2(vd, vs1, vs2, vl); } @@ -184,7 +184,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmsac_vf_f32mf2 (vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmsac_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { return vfwmsac_vf_f32mf2(vd, vs1, vs2, vl); } @@ -193,7 +193,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmsac_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmsac_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfwmsac_vv_f32m1(vd, vs1, vs2, vl); } @@ -202,7 +202,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmsac_vf_f32m1 (vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmsac_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { return vfwmsac_vf_f32m1(vd, vs1, vs2, vl); } @@ -211,7 +211,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmsac_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmsac_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfwmsac_vv_f32m2(vd, vs1, vs2, vl); } @@ -220,7 +220,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmsac_vf_f32m2 (vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmsac_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { return vfwmsac_vf_f32m2(vd, vs1, vs2, vl); } @@ -229,7 +229,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmsac_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmsac_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfwmsac_vv_f32m4(vd, vs1, vs2, vl); } @@ -238,7 +238,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmsac_vf_f32m4 (vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmsac_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { return vfwmsac_vf_f32m4(vd, vs1, vs2, vl); } @@ -247,7 +247,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmsac_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmsac_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfwmsac_vv_f32m8(vd, vs1, vs2, vl); } @@ -256,7 +256,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmsac_vf_f32m8 (vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmsac_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { return vfwmsac_vf_f32m8(vd, vs1, vs2, vl); } @@ -265,7 +265,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmsac_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfwmsac_vv_f32mf2_m(mask, vd, vs1, vs2, vl); } @@ -274,7 +274,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmsac_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { return vfwmsac_vf_f32mf2_m(mask, vd, vs1, vs2, vl); } @@ -283,7 +283,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmsac_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfwmsac_vv_f32m1_m(mask, vd, vs1, vs2, vl); } @@ -292,7 +292,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmsac_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { return vfwmsac_vf_f32m1_m(mask, vd, vs1, vs2, vl); } @@ -301,7 +301,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmsac_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfwmsac_vv_f32m2_m(mask, vd, vs1, vs2, vl); } @@ -310,7 +310,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmsac_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { return vfwmsac_vf_f32m2_m(mask, vd, vs1, vs2, vl); } @@ -319,7 +319,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmsac_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfwmsac_vv_f32m4_m(mask, vd, vs1, vs2, vl); } @@ -328,7 +328,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmsac_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { return vfwmsac_vf_f32m4_m(mask, vd, vs1, vs2, vl); } @@ -337,7 +337,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmsac_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfwmsac_vv_f32m8_m(mask, vd, vs1, vs2, vl); } @@ -346,6 +346,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmsac_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { return vfwmsac_vf_f32m8_m(mask, vd, vs1, vs2, vl); } + +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_vv_f64m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_vf_f64m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vv_f64m1_ta(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_vv_f64m1_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vf_f64m1_ta(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_vf_f64m1_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vv_f64m1_tuma(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_vv_f64m1_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vf_f64m1_tuma(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_vf_f64m1_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_vf_f64m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vv_f64m1_tama(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_vv_f64m1_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vf_f64m1_tama(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_vf_f64m1_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vv_f64m1_tamu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_vv_f64m1_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vf_f64m1_tamu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwmsac_vf_f64m1_tamu(mask, vd, vs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c @@ -171,7 +171,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmul_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat32mf2_t test_vfwmul_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfwmul_vv_f32mf2(op1, op2, vl); } @@ -180,7 +180,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmul_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat32mf2_t test_vfwmul_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfwmul_vf_f32mf2(op1, op2, vl); } @@ -189,7 +189,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmul_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat32m1_t test_vfwmul_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfwmul_vv_f32m1(op1, op2, vl); } @@ -198,7 +198,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmul_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat32m1_t test_vfwmul_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfwmul_vf_f32m1(op1, op2, vl); } @@ -207,7 +207,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmul_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat32m2_t test_vfwmul_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfwmul_vv_f32m2(op1, op2, vl); } @@ -216,7 +216,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmul_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat32m2_t test_vfwmul_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfwmul_vf_f32m2(op1, op2, vl); } @@ -225,7 +225,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmul_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat32m4_t test_vfwmul_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfwmul_vv_f32m4(op1, op2, vl); } @@ -234,7 +234,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmul_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat32m4_t test_vfwmul_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfwmul_vf_f32m4(op1, op2, vl); } @@ -243,7 +243,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmul_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat32m8_t test_vfwmul_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfwmul_vv_f32m8(op1, op2, vl); } @@ -252,7 +252,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmul_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat32m8_t test_vfwmul_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfwmul_vf_f32m8(op1, op2, vl); } @@ -261,7 +261,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmul_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat32mf2_t test_vfwmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfwmul_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); } @@ -270,7 +270,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmul_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat32mf2_t test_vfwmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfwmul_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); } @@ -279,7 +279,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmul_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat32m1_t test_vfwmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfwmul_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } @@ -288,7 +288,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmul_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat32m1_t test_vfwmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfwmul_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } @@ -297,7 +297,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmul_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat32m2_t test_vfwmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfwmul_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } @@ -306,7 +306,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmul_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat32m2_t test_vfwmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfwmul_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } @@ -315,7 +315,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmul_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat32m4_t test_vfwmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfwmul_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } @@ -324,7 +324,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmul_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat32m4_t test_vfwmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfwmul_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } @@ -333,7 +333,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmul_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat32m8_t test_vfwmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfwmul_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } @@ -342,6 +342,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmul_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat32m8_t test_vfwmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfwmul_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vv_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwmul_vv_f64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vf_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul_vf_f64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vv_f64m1_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwmul_vv_f64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vf_f64m1_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul_vf_f64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vv_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwmul_vv_f64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vf_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul_vf_f64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwmul_vv_f64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul_vf_f64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vv_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwmul_vv_f64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vf_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul_vf_f64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vv_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwmul_vv_f64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vf_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul_vf_f64m1_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c @@ -175,7 +175,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwnmacc_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfwnmacc_vv_f32mf2(vd, vs1, vs2, vl); } @@ -184,7 +184,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwnmacc_vf_f32mf2 (vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmacc_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { return vfwnmacc_vf_f32mf2(vd, vs1, vs2, vl); } @@ -193,7 +193,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwnmacc_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmacc_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfwnmacc_vv_f32m1(vd, vs1, vs2, vl); } @@ -202,7 +202,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwnmacc_vf_f32m1 (vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmacc_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { return vfwnmacc_vf_f32m1(vd, vs1, vs2, vl); } @@ -211,7 +211,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwnmacc_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmacc_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfwnmacc_vv_f32m2(vd, vs1, vs2, vl); } @@ -220,7 +220,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwnmacc_vf_f32m2 (vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmacc_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { return vfwnmacc_vf_f32m2(vd, vs1, vs2, vl); } @@ -229,7 +229,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwnmacc_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmacc_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfwnmacc_vv_f32m4(vd, vs1, vs2, vl); } @@ -238,7 +238,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwnmacc_vf_f32m4 (vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmacc_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { return vfwnmacc_vf_f32m4(vd, vs1, vs2, vl); } @@ -247,7 +247,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwnmacc_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmacc_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfwnmacc_vv_f32m8(vd, vs1, vs2, vl); } @@ -256,7 +256,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwnmacc_vf_f32m8 (vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmacc_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { return vfwnmacc_vf_f32m8(vd, vs1, vs2, vl); } @@ -265,7 +265,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfwnmacc_vv_f32mf2_m(mask, vd, vs1, vs2, vl); } @@ -274,7 +274,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { return vfwnmacc_vf_f32mf2_m(mask, vd, vs1, vs2, vl); } @@ -283,7 +283,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwnmacc_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfwnmacc_vv_f32m1_m(mask, vd, vs1, vs2, vl); } @@ -292,7 +292,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwnmacc_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { return vfwnmacc_vf_f32m1_m(mask, vd, vs1, vs2, vl); } @@ -301,7 +301,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwnmacc_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfwnmacc_vv_f32m2_m(mask, vd, vs1, vs2, vl); } @@ -310,7 +310,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwnmacc_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { return vfwnmacc_vf_f32m2_m(mask, vd, vs1, vs2, vl); } @@ -319,7 +319,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwnmacc_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfwnmacc_vv_f32m4_m(mask, vd, vs1, vs2, vl); } @@ -328,7 +328,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwnmacc_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { return vfwnmacc_vf_f32m4_m(mask, vd, vs1, vs2, vl); } @@ -337,7 +337,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwnmacc_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfwnmacc_vv_f32m8_m(mask, vd, vs1, vs2, vl); } @@ -346,6 +346,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwnmacc_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { return vfwnmacc_vf_f32m8_m(mask, vd, vs1, vs2, vl); } + +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_vv_f64m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_vf_f64m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vv_f64m1_ta(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_vv_f64m1_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vf_f64m1_ta(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_vf_f64m1_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vv_f64m1_tuma(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_vv_f64m1_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vf_f64m1_tuma(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_vf_f64m1_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_vf_f64m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vv_f64m1_tama(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_vv_f64m1_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vf_f64m1_tama(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_vf_f64m1_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vv_f64m1_tamu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_vv_f64m1_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vf_f64m1_tamu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmacc_vf_f64m1_tamu(mask, vd, vs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c @@ -175,7 +175,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwnmsac_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfwnmsac_vv_f32mf2(vd, vs1, vs2, vl); } @@ -184,7 +184,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwnmsac_vf_f32mf2 (vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmsac_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { return vfwnmsac_vf_f32mf2(vd, vs1, vs2, vl); } @@ -193,7 +193,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwnmsac_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmsac_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfwnmsac_vv_f32m1(vd, vs1, vs2, vl); } @@ -202,7 +202,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwnmsac_vf_f32m1 (vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmsac_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { return vfwnmsac_vf_f32m1(vd, vs1, vs2, vl); } @@ -211,7 +211,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwnmsac_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmsac_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfwnmsac_vv_f32m2(vd, vs1, vs2, vl); } @@ -220,7 +220,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwnmsac_vf_f32m2 (vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmsac_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { return vfwnmsac_vf_f32m2(vd, vs1, vs2, vl); } @@ -229,7 +229,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwnmsac_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmsac_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfwnmsac_vv_f32m4(vd, vs1, vs2, vl); } @@ -238,7 +238,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwnmsac_vf_f32m4 (vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmsac_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { return vfwnmsac_vf_f32m4(vd, vs1, vs2, vl); } @@ -247,7 +247,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwnmsac_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmsac_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfwnmsac_vv_f32m8(vd, vs1, vs2, vl); } @@ -256,7 +256,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwnmsac_vf_f32m8 (vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmsac_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { return vfwnmsac_vf_f32m8(vd, vs1, vs2, vl); } @@ -265,7 +265,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { return vfwnmsac_vv_f32mf2_m(mask, vd, vs1, vs2, vl); } @@ -274,7 +274,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.f16.nxv1f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { return vfwnmsac_vf_f32mf2_m(mask, vd, vs1, vs2, vl); } @@ -283,7 +283,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwnmsac_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { return vfwnmsac_vv_f32m1_m(mask, vd, vs1, vs2, vl); } @@ -292,7 +292,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.f16.nxv2f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwnmsac_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { return vfwnmsac_vf_f32m1_m(mask, vd, vs1, vs2, vl); } @@ -301,7 +301,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwnmsac_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { return vfwnmsac_vv_f32m2_m(mask, vd, vs1, vs2, vl); } @@ -310,7 +310,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.f16.nxv4f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwnmsac_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { return vfwnmsac_vf_f32m2_m(mask, vd, vs1, vs2, vl); } @@ -319,7 +319,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwnmsac_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { return vfwnmsac_vv_f32m4_m(mask, vd, vs1, vs2, vl); } @@ -328,7 +328,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.f16.nxv8f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwnmsac_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { return vfwnmsac_vf_f32m4_m(mask, vd, vs1, vs2, vl); } @@ -337,7 +337,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwnmsac_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { return vfwnmsac_vv_f32m8_m(mask, vd, vs1, vs2, vl); } @@ -346,6 +346,114 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.f16.nxv16f16.i64( [[VD:%.*]], half [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwnmsac_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { return vfwnmsac_vf_f32m8_m(mask, vd, vs1, vs2, vl); } + +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_vv_f64m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_vf_f64m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vv_f64m1_ta(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_vv_f64m1_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vf_f64m1_ta(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_vf_f64m1_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vv_f64m1_tuma(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_vv_f64m1_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vf_f64m1_tuma(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_vf_f64m1_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_vf_f64m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vv_f64m1_tama(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_vv_f64m1_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vf_f64m1_tama(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_vf_f64m1_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vv_f64m1_tamu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_vv_f64m1_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[VD:%.*]], float [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vf_f64m1_tamu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { + return vfwnmsac_vf_f64m1_tamu(mask, vd, vs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c @@ -331,7 +331,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat32mf2_t test_vfwsub_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfwsub_vv_f32mf2(op1, op2, vl); } @@ -340,7 +340,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat32mf2_t test_vfwsub_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfwsub_vf_f32mf2(op1, op2, vl); } @@ -349,7 +349,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat32mf2_t test_vfwsub_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { return vfwsub_wv_f32mf2(op1, op2, vl); } @@ -358,7 +358,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) { +vfloat32mf2_t test_vfwsub_wf_f32mf2(vfloat32mf2_t op1, _Float16 op2, size_t vl) { return vfwsub_wf_f32mf2(op1, op2, vl); } @@ -367,7 +367,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat32m1_t test_vfwsub_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfwsub_vv_f32m1(op1, op2, vl); } @@ -376,7 +376,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat32m1_t test_vfwsub_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfwsub_vf_f32m1(op1, op2, vl); } @@ -385,7 +385,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat32m1_t test_vfwsub_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { return vfwsub_wv_f32m1(op1, op2, vl); } @@ -394,7 +394,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) { +vfloat32m1_t test_vfwsub_wf_f32m1(vfloat32m1_t op1, _Float16 op2, size_t vl) { return vfwsub_wf_f32m1(op1, op2, vl); } @@ -403,7 +403,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat32m2_t test_vfwsub_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfwsub_vv_f32m2(op1, op2, vl); } @@ -412,7 +412,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat32m2_t test_vfwsub_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfwsub_vf_f32m2(op1, op2, vl); } @@ -421,7 +421,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { +vfloat32m2_t test_vfwsub_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { return vfwsub_wv_f32m2(op1, op2, vl); } @@ -430,7 +430,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) { +vfloat32m2_t test_vfwsub_wf_f32m2(vfloat32m2_t op1, _Float16 op2, size_t vl) { return vfwsub_wf_f32m2(op1, op2, vl); } @@ -439,7 +439,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat32m4_t test_vfwsub_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfwsub_vv_f32m4(op1, op2, vl); } @@ -448,7 +448,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat32m4_t test_vfwsub_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfwsub_vf_f32m4(op1, op2, vl); } @@ -457,7 +457,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { +vfloat32m4_t test_vfwsub_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { return vfwsub_wv_f32m4(op1, op2, vl); } @@ -466,7 +466,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) { +vfloat32m4_t test_vfwsub_wf_f32m4(vfloat32m4_t op1, _Float16 op2, size_t vl) { return vfwsub_wf_f32m4(op1, op2, vl); } @@ -475,7 +475,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat32m8_t test_vfwsub_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfwsub_vv_f32m8(op1, op2, vl); } @@ -484,7 +484,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat32m8_t test_vfwsub_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfwsub_vf_f32m8(op1, op2, vl); } @@ -493,7 +493,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { +vfloat32m8_t test_vfwsub_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { return vfwsub_wv_f32m8(op1, op2, vl); } @@ -502,7 +502,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_wf_f32m8 (vfloat32m8_t op1, _Float16 op2, size_t vl) { +vfloat32m8_t test_vfwsub_wf_f32m8(vfloat32m8_t op1, _Float16 op2, size_t vl) { return vfwsub_wf_f32m8(op1, op2, vl); } @@ -511,7 +511,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat32mf2_t test_vfwsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vfwsub_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); } @@ -520,7 +520,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat32mf2_t test_vfwsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfwsub_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); } @@ -529,7 +529,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_wv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat32mf2_t test_vfwsub_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { return vfwsub_wv_f32mf2_m(mask, maskedoff, op1, op2, vl); } @@ -538,7 +538,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_wf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { +vfloat32mf2_t test_vfwsub_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { return vfwsub_wf_f32mf2_m(mask, maskedoff, op1, op2, vl); } @@ -547,7 +547,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat32m1_t test_vfwsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vfwsub_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } @@ -556,7 +556,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat32m1_t test_vfwsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfwsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } @@ -565,7 +565,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_wv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat32m1_t test_vfwsub_wv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { return vfwsub_wv_f32m1_m(mask, maskedoff, op1, op2, vl); } @@ -574,7 +574,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_wf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { +vfloat32m1_t test_vfwsub_wf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { return vfwsub_wf_f32m1_m(mask, maskedoff, op1, op2, vl); } @@ -583,7 +583,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat32m2_t test_vfwsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vfwsub_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } @@ -592,7 +592,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat32m2_t test_vfwsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfwsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } @@ -601,7 +601,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_wv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { +vfloat32m2_t test_vfwsub_wv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { return vfwsub_wv_f32m2_m(mask, maskedoff, op1, op2, vl); } @@ -610,7 +610,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_wf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { +vfloat32m2_t test_vfwsub_wf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { return vfwsub_wf_f32m2_m(mask, maskedoff, op1, op2, vl); } @@ -619,7 +619,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat32m4_t test_vfwsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vfwsub_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } @@ -628,7 +628,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat32m4_t test_vfwsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfwsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } @@ -637,7 +637,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_wv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { +vfloat32m4_t test_vfwsub_wv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { return vfwsub_wv_f32m4_m(mask, maskedoff, op1, op2, vl); } @@ -646,7 +646,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_wf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { +vfloat32m4_t test_vfwsub_wf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { return vfwsub_wf_f32m4_m(mask, maskedoff, op1, op2, vl); } @@ -655,7 +655,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat32m8_t test_vfwsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vfwsub_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } @@ -664,7 +664,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat32m8_t test_vfwsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfwsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } @@ -673,7 +673,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_wv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { +vfloat32m8_t test_vfwsub_wv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { return vfwsub_wv_f32m8_m(mask, maskedoff, op1, op2, vl); } @@ -682,6 +682,222 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_wf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { +vfloat32m8_t test_vfwsub_wf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { return vfwsub_wf_f32m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vv_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_vv_f64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vf_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wv_f64m1_tu(vfloat64m1_t merge, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_wv_f64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wf_f64m1_tu(vfloat64m1_t merge, vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vv_f64m1_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_vv_f64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vf_f64m1_ta(vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wv_f64m1_ta(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_wv_f64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wf_f64m1_ta(vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vv_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_vv_f64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vf_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wv_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_wv_f64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wf_f64m1_tuma(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_vv_f64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wv_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_wv_f64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vv_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_vv_f64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vf_f64m1_tama(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wv_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_wv_f64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wf_f64m1_tama(vbool64_t mask, vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vv_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_vv_f64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vf_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wv_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { + return vfwsub_wv_f64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wf_f64m1_tamu(vbool64_t mask, vfloat64m1_t merge, vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m1_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { return vmacc_vx_u64m8_m(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmacc_vv_i32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmacc_vx_i32mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_vv_u32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_vx_u32mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vv_i32mf2_ta(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmacc_vv_i32mf2_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vx_i32mf2_ta(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmacc_vx_i32mf2_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_vv_u32mf2_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vx_u32mf2_ta(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_vx_u32mf2_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmacc_vv_i32mf2_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmacc_vx_i32mf2_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_vv_u32mf2_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_vx_u32mf2_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmacc_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmacc_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmacc_vv_i32mf2_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmacc_vx_i32mf2_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_vv_u32mf2_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_vx_u32mf2_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmacc_vv_i32mf2_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmacc_vx_i32mf2_tamu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_vv_u32mf2_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmacc_vx_u32mf2_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { return vmadd_vx_u64m8_m(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmadd_vv_i32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmadd_vx_i32mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_vv_u32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_vx_u32mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vv_i32mf2_ta(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmadd_vv_i32mf2_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vx_i32mf2_ta(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmadd_vx_i32mf2_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_vv_u32mf2_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vx_u32mf2_ta(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_vx_u32mf2_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmadd_vv_i32mf2_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmadd_vx_i32mf2_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_vv_u32mf2_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_vx_u32mf2_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmadd_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmadd_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmadd_vv_i32mf2_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmadd_vx_i32mf2_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_vv_u32mf2_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_vx_u32mf2_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vmadd_vv_i32mf2_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vmadd_vx_i32mf2_tamu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_vv_u32mf2_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vmadd_vx_u32mf2_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c @@ -1588,3 +1588,218 @@ return vmaxu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmax_vv_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmaxu_vv_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmax_vv_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmaxu_vv_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmax_vv_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmaxu_vv_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmax_vv_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmaxu_vv_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmax_vv_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmaxu_vv_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmax_vv_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmaxu_vv_u32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vminu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmin_vv_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vminu_vv_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmin_vv_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vminu_vv_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmin_vv_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vminu_vv_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmin_vv_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vminu_vv_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmin_vv_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vminu_vv_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmin_vv_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vminu_vv_u32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c @@ -3531,3 +3531,543 @@ vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { return vmulhsu_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmul_vv_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmul_vv_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh_vv_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu_vv_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu_vv_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmul_vv_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmul_vv_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh_vv_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu_vv_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu_vv_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vx_i32mf2_ta(vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmul_vv_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmul_vv_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh_vv_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu_vv_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu_vv_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmul_vv_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmul_vv_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh_vv_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu_vv_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu_vv_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmul_vv_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmul_vv_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh_vv_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu_vv_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu_vv_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmul_vv_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmul_vv_u32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh_vv_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu_vv_u32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu_vv_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c @@ -1188,3 +1188,219 @@ vuint64m8_t op1, size_t shift, size_t vl) { return vnclipu_wx_u32m4_m(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wv_i32mf2_tu(vint32mf2_t merge, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclip_wv_i32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wx_i32mf2_tu(vint32mf2_t merge, vint64m1_t op1, size_t shift, size_t vl) { + return vnclip_wx_i32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wv_u32mf2_tu(vuint32mf2_t merge, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclipu_wv_u32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wx_u32mf2_tu(vuint32mf2_t merge, vuint64m1_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wv_i32mf2_ta(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclip_wv_i32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wx_i32mf2_ta(vint64m1_t op1, size_t shift, size_t vl) { + return vnclip_wx_i32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wv_u32mf2_ta(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclipu_wv_u32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wx_u32mf2_ta(vuint64m1_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclip_wv_i32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, size_t shift, size_t vl) { + return vnclip_wx_i32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclipu_wv_u32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclip_wv_i32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, size_t shift, size_t vl) { + return vnclip_wx_i32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclipu_wv_u32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wv_i32mf2_tama(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclip_wv_i32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wx_i32mf2_tama(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { + return vnclip_wx_i32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wv_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclipu_wv_u32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wx_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclip_wv_i32mf2_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, size_t shift, size_t vl) { + return vnclip_wx_i32mf2_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnclipu_wv_u32mf2_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u32mf2_tamu(mask, merge, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { return vnmsac_vx_u64m8_m(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_vv_i32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_vx_i32mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_vv_u32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_vx_u32mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vv_i32mf2_ta(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_vv_i32mf2_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vx_i32mf2_ta(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_vx_i32mf2_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_vv_u32mf2_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vx_u32mf2_ta(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_vx_u32mf2_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_vv_i32mf2_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_vx_i32mf2_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_vv_u32mf2_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_vx_u32mf2_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_vv_i32mf2_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_vx_i32mf2_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_vv_u32mf2_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_vx_u32mf2_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_vv_i32mf2_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsac_vx_i32mf2_tamu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_vv_u32mf2_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsac_vx_u32mf2_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { return vnmsub_vx_u64m8_m(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_vv_i32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_vx_i32mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_vv_u32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_vx_u32mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vv_i32mf2_ta(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_vv_i32mf2_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vx_i32mf2_ta(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_vx_i32mf2_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_vv_u32mf2_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vx_u32mf2_ta(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_vx_u32mf2_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_vv_i32mf2_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_vx_i32mf2_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_vv_u32mf2_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_vx_u32mf2_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_vv_i32mf2_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_vx_i32mf2_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_vv_u32mf2_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_vx_u32mf2_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_vv_i32mf2_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vnmsub_vx_i32mf2_tamu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_vv_u32mf2_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vnmsub_vx_u32mf2_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c @@ -543,3 +543,111 @@ vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { return vnsra_wx_i32m4_m(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wv_i32mf2_tu(vint32mf2_t merge, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsra_wv_i32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wx_i32mf2_tu(vint32mf2_t merge, vint64m1_t op1, size_t shift, size_t vl) { + return vnsra_wx_i32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wv_i32mf2_ta(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsra_wv_i32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wx_i32mf2_ta(vint64m1_t op1, size_t shift, size_t vl) { + return vnsra_wx_i32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsra_wv_i32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, size_t shift, size_t vl) { + return vnsra_wx_i32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsra_wv_i32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, size_t shift, size_t vl) { + return vnsra_wx_i32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wv_i32mf2_tama(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsra_wv_i32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wx_i32mf2_tama(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { + return vnsra_wx_i32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsra_wv_i32mf2_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint64m1_t op1, size_t shift, size_t vl) { + return vnsra_wx_i32mf2_tamu(mask, merge, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c @@ -543,3 +543,111 @@ vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { return vnsrl_wx_u32m4_m(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wv_u32mf2_tu(vuint32mf2_t merge, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsrl_wv_u32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wx_u32mf2_tu(vuint32mf2_t merge, vuint64m1_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wv_u32mf2_ta(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsrl_wv_u32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wx_u32mf2_ta(vuint64m1_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsrl_wv_u32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsrl_wv_u32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wv_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsrl_wv_u32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wx_u32mf2_tama(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { + return vnsrl_wv_u32mf2_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint64m1_t op1, size_t shift, size_t vl) { + return vnsrl_wx_u32mf2_tamu(mask, merge, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vor_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vor_vv_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vor_vx_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vor_vv_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vor_vv_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vor_vx_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vor_vv_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vor_vv_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vor_vx_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vor_vv_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vor_vv_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vor_vx_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vor_vv_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vor_vv_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vor_vx_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vor_vv_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vor_vv_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vor_vx_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vor_vv_u32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vremu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vrem_vv_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vremu_vv_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vrem_vv_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vremu_vv_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vrem_vv_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vremu_vv_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vrem_vv_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vremu_vv_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vrem_vv_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vremu_vv_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vrem_vv_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vremu_vv_u32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c @@ -3533,3 +3533,489 @@ vfloat16m8_t test_vrgatherei16_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { return vrgatherei16_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_i32mf2_tu(merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_i32mf2_tu(merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_u32mf2_tu(merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_u32mf2_tu(merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_f32mf2_tu(merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vx_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_f32mf2_tu(merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgatherei16_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_f32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_i32mf2_ta(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vx_i32mf2_ta(vint32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_i32mf2_ta(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_u32mf2_ta(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vx_u32mf2_ta(vuint32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_u32mf2_ta(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vv_f32mf2_ta(vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_f32mf2_ta(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vx_f32mf2_ta(vfloat32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_f32mf2_ta(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgatherei16_vv_i32mf2_ta(vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgatherei16_vv_u32mf2_ta(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_ta(vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_f32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_i32mf2_tuma(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_i32mf2_tuma(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_u32mf2_tuma(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_u32mf2_tuma(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_f32mf2_tuma(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vx_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_f32mf2_tuma(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgatherei16_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_f32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_i32mf2_tumu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_i32mf2_tumu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_u32mf2_tumu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_u32mf2_tumu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_f32mf2_tumu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_f32mf2_tumu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgatherei16_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_f32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_i32mf2_tama(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_i32mf2_tama(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_u32mf2_tama(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_u32mf2_tama(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_f32mf2_tama(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vx_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_f32mf2_tama(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgatherei16_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_f32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_i32mf2_tamu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgather_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_i32mf2_tamu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_u32mf2_tamu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_u32mf2_tamu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { + return vrgather_vv_f32mf2_tamu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vx_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_f32mf2_tamu(mask, merge, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgatherei16_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_u32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_f32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c @@ -795,3 +795,111 @@ vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vrsub_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrsub_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrsub_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrsub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrsub_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrsub_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrsub_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrsub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrsub_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrsub_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrsub_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c @@ -1702,3 +1702,219 @@ vuint64m8_t op1, uint64_t op2, size_t vl) { return vsaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsadd_vv_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsaddu_vv_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsadd_vv_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsaddu_vv_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsadd_vv_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsaddu_vv_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsadd_vv_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsaddu_vv_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsadd_vv_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsaddu_vv_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsadd_vv_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsaddu_vv_u32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c @@ -883,3 +883,75 @@ vbool8_t borrowin, size_t vl) { return vsbc_vxm_u64m8(op1, op2, borrowin, vl); } + +// CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsbc_vvm_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) { + return vsbc_vvm_i32mf2_tu(merge, op1, op2, borrowin, vl); +} + +// CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsbc_vxm_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) { + return vsbc_vxm_i32mf2_tu(merge, op1, op2, borrowin, vl); +} + +// CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsbc_vvm_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) { + return vsbc_vvm_u32mf2_tu(merge, op1, op2, borrowin, vl); +} + +// CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsbc_vxm_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) { + return vsbc_vxm_u32mf2_tu(merge, op1, op2, borrowin, vl); +} + +// CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsbc_vvm_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) { + return vsbc_vvm_i32mf2_ta(op1, op2, borrowin, vl); +} + +// CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsbc_vxm_i32mf2_ta(vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) { + return vsbc_vxm_i32mf2_ta(op1, op2, borrowin, vl); +} + +// CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsbc_vvm_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) { + return vsbc_vvm_u32mf2_ta(op1, op2, borrowin, vl); +} + +// CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsbc_vxm_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) { + return vsbc_vxm_u32mf2_ta(op1, op2, borrowin, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c @@ -535,3 +535,57 @@ vint32m4_t op1, size_t vl) { return vsext_vf2_i64m8_m(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf2_i64m1_tu(vint64m1_t merge, vint32mf2_t op1, size_t vl) { + return vsext_vf2_i64m1_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf2_i64m1_ta(vint32mf2_t op1, size_t vl) { + return vsext_vf2_i64m1_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf2_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, size_t vl) { + return vsext_vf2_i64m1_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf2_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, size_t vl) { + return vsext_vf2_i64m1_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf2_i64m1_tama(vbool64_t mask, vint32mf2_t op1, size_t vl) { + return vsext_vf2_i64m1_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf2_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, size_t vl) { + return vsext_vf2_i64m1_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c @@ -903,3 +903,111 @@ size_t vl) { return vslide1down_vx_u64m8_m(mask, maskedoff, src, value, vl); } + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1down_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1down_vx_i32mf2_tu(merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1down_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1down_vx_u32mf2_tu(merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1down_vx_i32mf2_ta(vint32mf2_t src, int32_t value, size_t vl) { + return vslide1down_vx_i32mf2_ta(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1down_vx_u32mf2_ta(vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1down_vx_u32mf2_ta(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1down_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1down_vx_i32mf2_tuma(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1down_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1down_vx_u32mf2_tuma(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1down_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1down_vx_i32mf2_tumu(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1down_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1down_vx_u32mf2_tumu(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1down_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1down_vx_i32mf2_tama(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1down_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1down_vx_u32mf2_tama(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1down_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1down_vx_i32mf2_tamu(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1down_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1down_vx_u32mf2_tamu(mask, merge, src, value, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c @@ -878,3 +878,111 @@ size_t vl) { return vslide1up_vx_u64m8_m(mask, maskedoff, src, value, vl); } + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1up_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1up_vx_i32mf2_tu(merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1up_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1up_vx_u32mf2_tu(merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1up_vx_i32mf2_ta(vint32mf2_t src, int32_t value, size_t vl) { + return vslide1up_vx_i32mf2_ta(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1up_vx_u32mf2_ta(vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1up_vx_u32mf2_ta(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1up_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1up_vx_i32mf2_tuma(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1up_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1up_vx_u32mf2_tuma(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1up_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1up_vx_i32mf2_tumu(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1up_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1up_vx_u32mf2_tumu(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1up_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1up_vx_i32mf2_tama(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1up_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1up_vx_u32mf2_tama(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1up_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, int32_t value, size_t vl) { + return vslide1up_vx_i32mf2_tamu(mask, merge, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1up_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, uint32_t value, size_t vl) { + return vslide1up_vx_u32mf2_tamu(mask, merge, src, value, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c @@ -1218,3 +1218,165 @@ vfloat16m8_t test_vslidedown_vx_f16m8_m (vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { return vslidedown_vx_f16m8_m(mask, dest, src, offset, vl); } + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32mf2_tu(merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32mf2_tu(merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32mf2_tu(merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_ta(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32mf2_ta(dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_ta(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32mf2_ta(dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_ta(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32mf2_ta(dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32mf2_tuma(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32mf2_tuma(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32mf2_tuma(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32mf2_tumu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32mf2_tumu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32mf2_tumu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32mf2_tama(mask, dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32mf2_tama(mask, dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tama(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32mf2_tama(mask, dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32mf2_tamu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32mf2_tamu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32mf2_tamu(mask, merge, src, offset, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c @@ -1203,3 +1203,165 @@ vfloat16m8_t test_vslideup_vx_f16m8_m (vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { return vslideup_vx_f16m8_m(mask, dest, src, offset, vl); } + +// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslideup_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { + return vslideup_vx_i32mf2_tu(merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslideup_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { + return vslideup_vx_u32mf2_tu(merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslideup_vx_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslideup_vx_f32mf2_tu(merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslideup_vx_i32mf2_ta(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { + return vslideup_vx_i32mf2_ta(dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslideup_vx_u32mf2_ta(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { + return vslideup_vx_u32mf2_ta(dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslideup_vx_f32mf2_ta(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslideup_vx_f32mf2_ta(dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslideup_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { + return vslideup_vx_i32mf2_tuma(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslideup_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { + return vslideup_vx_u32mf2_tuma(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslideup_vx_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslideup_vx_f32mf2_tuma(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslideup_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { + return vslideup_vx_i32mf2_tumu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslideup_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { + return vslideup_vx_u32mf2_tumu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslideup_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslideup_vx_f32mf2_tumu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslideup_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { + return vslideup_vx_i32mf2_tama(mask, dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslideup_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { + return vslideup_vx_u32mf2_tama(mask, dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslideup_vx_f32mf2_tama(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslideup_vx_f32mf2_tama(mask, dest, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslideup_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { + return vslideup_vx_i32mf2_tamu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslideup_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { + return vslideup_vx_u32mf2_tamu(mask, merge, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslideup_vx_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslideup_vx_f32mf2_tamu(mask, merge, src, offset, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { return vsll_vx_u64m8_m(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_vv_i32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_i32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_vv_u32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_u32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_vv_i32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vx_i32mf2_ta(vint32mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_i32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_vv_u32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vx_u32mf2_ta(vuint32mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_u32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_vv_i32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_i32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_vv_u32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_u32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_vv_i32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_i32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_vv_u32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_u32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_vv_i32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_i32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_vv_u32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_u32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_vv_i32mf2_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_i32mf2_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_vv_u32mf2_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_u32mf2_tamu(mask, merge, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c @@ -690,3 +690,111 @@ vint32m8_t op1, int32_t op2, size_t vl) { return vsmul_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsmul_vv_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsmul_vv_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsmul_vv_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsmul_vv_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsmul_vv_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsmul_vv_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c @@ -795,3 +795,111 @@ vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { return vsra_vx_i64m8_m(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsra_vv_i32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vsra_vx_i32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsra_vv_i32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vx_i32mf2_ta(vint32mf2_t op1, size_t shift, size_t vl) { + return vsra_vx_i32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsra_vv_i32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vsra_vx_i32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsra_vv_i32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vsra_vx_i32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsra_vv_i32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { + return vsra_vx_i32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsra_vv_i32mf2_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vsra_vx_i32mf2_tamu(mask, merge, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c @@ -796,3 +796,110 @@ return vsrl_vx_u64m8_m(mask, maskedoff, op1, shift, vl); } +// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsrl_vv_u32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsrl_vv_u32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vx_u32mf2_ta(vuint32mf2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsrl_vv_u32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsrl_vv_u32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsrl_vv_u32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsrl_vv_u32mf2_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32mf2_tamu(mask, merge, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c @@ -845,3 +845,111 @@ vint64m8_t op1, size_t shift, size_t vl) { return vssra_vx_i64m8_m(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssra_vv_i32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vssra_vx_i32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vv_i32mf2_ta(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssra_vv_i32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vx_i32mf2_ta(vint32mf2_t op1, size_t shift, size_t vl) { + return vssra_vx_i32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssra_vv_i32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vssra_vx_i32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssra_vv_i32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vssra_vx_i32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssra_vv_i32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { + return vssra_vx_i32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssra_vv_i32mf2_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t shift, size_t vl) { + return vssra_vx_i32mf2_tamu(mask, merge, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c @@ -860,3 +860,111 @@ vuint64m8_t op1, size_t shift, size_t vl) { return vssrl_vx_u64m8_m(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssrl_vv_u32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32mf2_tu(merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssrl_vv_u32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vx_u32mf2_ta(vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32mf2_ta(op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssrl_vv_u32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32mf2_tuma(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssrl_vv_u32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32mf2_tumu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssrl_vv_u32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32mf2_tama(mask, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vssrl_vv_u32mf2_tamu(mask, merge, op1, shift, vl); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32mf2_tamu(mask, merge, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c @@ -1702,3 +1702,219 @@ vuint64m8_t op1, uint64_t op2, size_t vl) { return vssubu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vssub_vv_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vssubu_vv_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vssub_vv_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vssubu_vv_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vssub_vv_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vssubu_vv_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vssub_vv_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vssubu_vv_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vssub_vv_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vssubu_vv_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vssub_vv_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vssubu_vv_u32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vsub_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsub_vv_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsub_vv_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsub_vv_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsub_vv_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsub_vv_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsub_vv_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsub_vv_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsub_vv_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsub_vv_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsub_vv_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsub_vv_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsub_vv_u32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c @@ -2319,3 +2319,435 @@ vuint64m8_t op1, uint32_t op2, size_t vl) { return vwaddu_wx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vv_i64m1_tu(vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_vv_i64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vx_i64m1_tu(vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx_i64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wv_i64m1_tu(vint64m1_t merge, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_wv_i64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wx_i64m1_tu(vint64m1_t merge, vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx_i64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vv_u64m1_tu(vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_vv_u64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vx_u64m1_tu(vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_u64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wv_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_wv_u64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wx_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_u64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vv_i64m1_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_vv_i64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vx_i64m1_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx_i64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wv_i64m1_ta(vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_wv_i64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wx_i64m1_ta(vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx_i64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vv_u64m1_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_vv_u64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vx_u64m1_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_u64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wv_u64m1_ta(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_wv_u64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wx_u64m1_ta(vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_u64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vv_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_vv_i64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vx_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx_i64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wv_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_wv_i64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wx_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx_i64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vv_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_vv_u64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vx_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_u64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wv_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_wv_u64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wx_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_u64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_vv_i64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx_i64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wv_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_wv_i64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wx_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx_i64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_vv_u64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_u64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wv_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_wv_u64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wx_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_u64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vv_i64m1_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_vv_i64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vx_i64m1_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx_i64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wv_i64m1_tama(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_wv_i64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wx_i64m1_tama(vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx_i64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vv_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_vv_u64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vx_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_u64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wv_u64m1_tama(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_wv_u64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wx_u64m1_tama(vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_u64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vv_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_vv_i64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vx_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx_i64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wv_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_wv_i64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wx_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx_i64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vv_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_vv_u64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vx_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_u64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wv_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_wv_u64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wx_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_u64m1_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c @@ -2131,3 +2131,381 @@ vint32m4_t op2, size_t vl) { return vwmaccus_vx_i64m8_m(mask, acc, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_vv_i64m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_vx_i64m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_vv_u64m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vx_u64m1_tu(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_vx_u64m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_vv_i64m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_vx_i64m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccus_vx_i64m1_tu(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmaccus_vx_i64m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vv_i64m1_ta(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_vv_i64m1_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vx_i64m1_ta(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_vx_i64m1_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vv_u64m1_ta(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_vv_u64m1_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vx_u64m1_ta(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_vx_u64m1_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vv_i64m1_ta(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_vv_i64m1_ta(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vx_i64m1_ta(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_vx_i64m1_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccus_vx_i64m1_ta(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmaccus_vx_i64m1_ta(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vv_i64m1_tuma(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_vv_i64m1_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vx_i64m1_tuma(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_vx_i64m1_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vv_u64m1_tuma(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_vv_u64m1_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vx_u64m1_tuma(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_vx_u64m1_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vv_i64m1_tuma(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_vv_i64m1_tuma(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vx_i64m1_tuma(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_vx_i64m1_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccus_vx_i64m1_tuma(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmaccus_vx_i64m1_tuma(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_vv_u64m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_vx_u64m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccus_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmaccus_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vv_i64m1_tama(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_vv_i64m1_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vx_i64m1_tama(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_vx_i64m1_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vv_u64m1_tama(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_vv_u64m1_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vx_u64m1_tama(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_vx_u64m1_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vv_i64m1_tama(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_vv_i64m1_tama(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vx_i64m1_tama(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_vx_i64m1_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccus_vx_i64m1_tama(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmaccus_vx_i64m1_tama(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vv_i64m1_tamu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_vv_i64m1_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmacc_vx_i64m1_tamu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmacc_vx_i64m1_tamu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vv_u64m1_tamu(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_vv_u64m1_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmaccu_vx_u64m1_tamu(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccu_vx_u64m1_tamu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vv_i64m1_tamu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_vv_i64m1_tamu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccsu_vx_i64m1_tamu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return vwmaccsu_vx_i64m1_tamu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[VD:%.*]], i32 [[RS1:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmaccus_vx_i64m1_tamu(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return vwmaccus_vx_i64m1_tamu(mask, vd, rs1, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c @@ -1623,3 +1623,327 @@ vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { return vwmulsu_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vv_i64m1_tu(vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwmul_vv_i64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vx_i64m1_tu(vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwmul_vx_i64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vv_u64m1_tu(vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulu_vv_u64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vx_u64m1_tu(vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulu_vx_u64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vv_i64m1_tu(vint64m1_t merge, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulsu_vv_i64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vx_i64m1_tu(vint64m1_t merge, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulsu_vx_i64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vv_i64m1_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwmul_vv_i64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vx_i64m1_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vwmul_vx_i64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vv_u64m1_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulu_vv_u64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vx_u64m1_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulu_vx_u64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vv_i64m1_ta(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulsu_vv_i64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vx_i64m1_ta(vint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulsu_vx_i64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vv_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwmul_vv_i64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vx_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwmul_vx_i64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vv_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulu_vv_u64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vx_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulu_vx_u64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vv_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulsu_vv_i64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vx_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulsu_vx_i64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwmul_vv_i64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwmul_vx_i64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulu_vv_u64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulu_vx_u64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulsu_vv_i64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulsu_vx_i64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vv_i64m1_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwmul_vv_i64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vx_i64m1_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwmul_vx_i64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vv_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulu_vv_u64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vx_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulu_vx_u64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vv_i64m1_tama(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulsu_vv_i64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vx_i64m1_tama(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulsu_vx_i64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vv_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwmul_vv_i64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vx_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwmul_vx_i64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vv_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulu_vv_u64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vx_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulu_vx_u64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vv_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwmulsu_vv_i64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vx_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vwmulsu_vx_i64m1_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c @@ -2319,3 +2319,435 @@ vuint64m8_t op1, uint32_t op2, size_t vl) { return vwsubu_wx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vv_i64m1_tu (vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_vv_i64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vx_i64m1_tu (vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx_i64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wv_i64m1_tu (vint64m1_t merge, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_wv_i64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wx_i64m1_tu (vint64m1_t merge, vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx_i64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vv_u64m1_tu (vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_vv_u64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vx_u64m1_tu (vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_u64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wv_u64m1_tu (vuint64m1_t merge, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_wv_u64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wx_u64m1_tu (vuint64m1_t merge, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_u64m1_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vv_i64m1_ta (vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_vv_i64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vx_i64m1_ta (vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx_i64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wv_i64m1_ta (vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_wv_i64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wx_i64m1_ta (vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx_i64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vv_u64m1_ta (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_vv_u64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vx_u64m1_ta (vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_u64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wv_u64m1_ta (vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_wv_u64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wx_u64m1_ta (vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_u64m1_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vv_i64m1_tuma (vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_vv_i64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vx_i64m1_tuma (vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx_i64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wv_i64m1_tuma (vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_wv_i64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wx_i64m1_tuma (vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx_i64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vv_u64m1_tuma (vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_vv_u64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vx_u64m1_tuma (vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_u64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wv_u64m1_tuma (vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_wv_u64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wx_u64m1_tuma (vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_u64m1_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vv_i64m1_tumu (vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_vv_i64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vx_i64m1_tumu (vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx_i64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wv_i64m1_tumu (vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_wv_i64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wx_i64m1_tumu (vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx_i64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vv_u64m1_tumu (vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_vv_u64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_u64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wv_u64m1_tumu (vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_wv_u64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wx_u64m1_tumu (vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_u64m1_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vv_i64m1_tama (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_vv_i64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vx_i64m1_tama (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx_i64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wv_i64m1_tama (vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_wv_i64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wx_i64m1_tama (vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx_i64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vv_u64m1_tama (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_vv_u64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vx_u64m1_tama (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_u64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wv_u64m1_tama (vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_wv_u64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wx_u64m1_tama (vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_u64m1_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vv_i64m1_tamu (vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_vv_i64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vx_i64m1_tamu (vbool64_t mask, vint64m1_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx_i64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wv_i64m1_tamu (vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_wv_i64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wx_i64m1_tamu (vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx_i64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vv_u64m1_tamu (vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_vv_u64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vx_u64m1_tamu (vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_u64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wv_u64m1_tamu (vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_wv_u64m1_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wx_u64m1_tamu (vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_u64m1_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c @@ -1587,3 +1587,219 @@ vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vxor_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vxor_vv_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vxor_vv_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32mf2_tu(merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vxor_vv_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vxor_vv_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32mf2_ta(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vxor_vv_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vxor_vv_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32mf2_tuma(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vxor_vv_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vxor_vv_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32mf2_tumu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vxor_vv_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vxor_vv_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32mf2_tama(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vxor_vv_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vxor_vv_u32mf2_tamu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32mf2_tamu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c @@ -535,3 +535,57 @@ vuint32m4_t op1, size_t vl) { return vzext_vf2_u64m8_m(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf2_u64m1_tu(vuint64m1_t merge, vuint32mf2_t op1, size_t vl) { + return vzext_vf2_u64m1_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf2_u64m1_ta(vuint32mf2_t op1, size_t vl) { + return vzext_vf2_u64m1_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf2_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, size_t vl) { + return vzext_vf2_u64m1_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf2_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, size_t vl) { + return vzext_vf2_u64m1_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf2_u64m1_tama(vbool64_t mask, vuint32mf2_t op1, size_t vl) { + return vzext_vf2_u64m1_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf2_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t op1, size_t vl) { + return vzext_vf2_u64m1_tamu(mask, merge, op1, vl); +}