diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -1745,7 +1745,8 @@ ["Uv", "UvUw"]]>; // 12.8. Vector Integer Comparison Instructions -let MaskedPolicyScheme = NonePolicy in { +let MaskedPolicyScheme = HasPassthruOperand, + HasTailPolicy = false in { defm vmseq : RVVIntMaskOutBuiltinSet; defm vmsne : RVVIntMaskOutBuiltinSet; defm vmsltu : RVVUnsignedMaskOutBuiltinSet; @@ -1950,7 +1951,8 @@ defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "xfd">; // 14.13. Vector Floating-Point Compare Instructions -let MaskedPolicyScheme = NonePolicy in { +let MaskedPolicyScheme = HasPassthruOperand, + HasTailPolicy = false in { defm vmfeq : RVVFloatingMaskOutBuiltinSet; defm vmfne : RVVFloatingMaskOutBuiltinSet; defm vmflt : RVVFloatingMaskOutBuiltinSet; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c @@ -361,3 +361,39 @@ vfloat64m8_t op1, double op2, size_t vl) { return vmfeq(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfeq_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfeq_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfeq_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfeq_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c @@ -329,3 +329,39 @@ return vmfge(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfge_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfge_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfge_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfge_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c @@ -329,3 +329,39 @@ return vmfgt(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfgt_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfgt_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfgt_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfgt_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c @@ -361,3 +361,39 @@ vfloat64m8_t op1, double op2, size_t vl) { return vmfle(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfle_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfle_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfle_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfle_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c @@ -361,3 +361,39 @@ vfloat64m8_t op1, double op2, size_t vl) { return vmflt(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmflt_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmflt_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmflt_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vmflt_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c @@ -361,3 +361,39 @@ vfloat64m8_t op1, double op2, size_t vl) { return vmfne(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfne_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfne_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfne_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfne_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c @@ -1699,3 +1699,75 @@ vuint64m8_t op1, uint64_t op2, size_t vl) { return vmseq(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmseq_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmseq_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmseq_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmseq_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmseq_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmseq_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmseq_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmseq_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmseq_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmseq_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmseq_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmseq_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmseq_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmseq_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmseq_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmseq_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c @@ -1588,3 +1588,75 @@ return vmsgeu(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsge_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsge_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsge_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsge_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgeu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsgeu_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgeu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsgeu_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsge_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsge_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsge_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsge_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgeu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsgeu_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgeu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsgeu_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c @@ -1588,3 +1588,75 @@ return vmsgtu(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsgt_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsgt_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgtu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsgtu_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgtu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsgtu_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgt_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsgt_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgt_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsgt_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgtu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsgtu_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgtu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsgtu_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c @@ -1714,3 +1714,75 @@ vuint64m8_t op1, uint64_t op2, size_t vl) { return vmsleu(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsle_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsle_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsle_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsle_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsleu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsleu_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsleu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsleu_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsle_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsle_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsle_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsle_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsleu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsleu_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsleu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsleu_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c @@ -1714,3 +1714,75 @@ vuint64m8_t op1, uint64_t op2, size_t vl) { return vmsltu(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmslt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmslt_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmslt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmslt_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsltu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsltu_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsltu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsltu_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmslt_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmslt_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmslt_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmslt_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsltu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsltu_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsltu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsltu_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c @@ -1699,3 +1699,75 @@ vuint64m8_t op1, uint64_t op2, size_t vl) { return vmsne(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsne_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsne_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsne_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsne_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsne_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsne_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsne_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsne_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsne_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsne_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsne_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsne_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsne_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsne_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsne_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsne_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c @@ -578,3 +578,39 @@ vbool2_t test_vmfeq_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vmfeq_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfeq_vv_f32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfeq_vf_f32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfeq_vv_f32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfeq_vf_f32mf2_b64_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c @@ -545,3 +545,39 @@ vbool2_t test_vmfge_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vmfge_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfge_vv_f32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfge_vf_f32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfge_vv_f32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfge_vf_f32mf2_b64_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c @@ -545,3 +545,39 @@ vbool2_t test_vmfgt_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vmfgt_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfgt_vv_f32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfgt_vf_f32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfgt_vv_f32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfgt_vf_f32mf2_b64_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c @@ -578,3 +578,39 @@ vbool2_t test_vmfle_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vmfle_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfle_vv_f32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfle_vf_f32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfle_vv_f32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfle_vf_f32mf2_b64_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c @@ -578,3 +578,39 @@ vbool2_t test_vmflt_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vmflt_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmflt_vv_f32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmflt_vf_f32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmflt_vv_f32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vmflt_vf_f32mf2_b64_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c @@ -578,3 +578,39 @@ vbool2_t test_vmfne_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vmfne_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfne_vv_f32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfne_vf_f32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfne_vv_f32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfne_vf_f32mf2_b64_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c @@ -1699,3 +1699,75 @@ vuint64m8_t op1, uint64_t op2, size_t vl) { return vmseq_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmseq_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmseq_vv_i32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmseq_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmseq_vx_i32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmseq_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmseq_vv_u32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmseq_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmseq_vx_u32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmseq_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmseq_vv_i32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmseq_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmseq_vx_i32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmseq_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmseq_vv_u32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmseq_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmseq_vx_u32mf2_b64_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsge.c @@ -1588,3 +1588,75 @@ return vmsgeu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsge_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsge_vv_i32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsge_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsge_vx_i32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgeu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsgeu_vv_u32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgeu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsgeu_vx_u32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsge_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsge_vv_i32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsge_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsge_vx_i32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgeu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsgeu_vv_u32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgeu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsgeu_vx_u32mf2_b64_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c @@ -1588,3 +1588,75 @@ return vmsgtu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsgt_vv_i32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsgt_vx_i32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgtu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsgtu_vv_u32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgtu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsgtu_vx_u32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgt_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsgt_vv_i32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgt_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsgt_vx_i32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgtu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsgtu_vv_u32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsgtu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsgtu_vx_u32mf2_b64_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c @@ -1714,3 +1714,75 @@ vuint64m8_t op1, uint64_t op2, size_t vl) { return vmsleu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsle_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsle_vv_i32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsle_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsle_vx_i32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsleu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsleu_vv_u32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsleu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsleu_vx_u32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsle_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsle_vv_i32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsle_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsle_vx_i32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsleu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsleu_vv_u32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsleu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsleu_vx_u32mf2_b64_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c @@ -1714,3 +1714,75 @@ vuint64m8_t op1, uint64_t op2, size_t vl) { return vmsltu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmslt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmslt_vv_i32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmslt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmslt_vx_i32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsltu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsltu_vv_u32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsltu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsltu_vx_u32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmslt_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmslt_vv_i32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmslt_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmslt_vx_i32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsltu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsltu_vv_u32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsltu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsltu_vx_u32mf2_b64_mu(mask, merge, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c @@ -1699,3 +1699,75 @@ vuint64m8_t op1, uint64_t op2, size_t vl) { return vmsne_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsne_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsne_vv_i32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsne_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsne_vx_i32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsne_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsne_vv_u32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsne_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsne_vx_u32mf2_b64_ma(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsne_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmsne_vv_i32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsne_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsne_vx_i32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsne_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmsne_vv_u32mf2_b64_mu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmsne_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsne_vx_u32mf2_b64_mu(mask, merge, op1, op2, vl); +}