diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -1282,27 +1282,28 @@ let Name = NAME, IRName = IR, MaskedIRName = IR # "_mask", + UnMaskedPolicyScheme = HasPassthruOperand, ManualCodegen = [{ { - // op1, vl - IntrinsicTypes = {ResultType, - cast(ResultType)->getElementType(), - Ops[1]->getType()}; - Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[1])); - // insert undef passthru - Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + if (DefaultPolicy == TAIL_AGNOSTIC) + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + auto ElemTy = cast(ResultType)->getElementType(); + Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy)); + // passthru, op1, op2, vl + IntrinsicTypes = {ResultType, ElemTy, Ops[3]->getType()}; break; } }], MaskedManualCodegen = [{ { std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); - Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_UNDISTURBED)); - // maskedoff, op1, mask, vl - IntrinsicTypes = {ResultType, - cast(ResultType)->getElementType(), - Ops[3]->getType()}; - Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(IntrinsicTypes[1])); + if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC) + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + auto ElemTy = cast(ResultType)->getElementType(); + Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy)); + Ops.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy)); + // maskedoff, op1, op2, mask, vl, policy + IntrinsicTypes = {ResultType, ElemTy, Ops[4]->getType()}; break; } }] in { @@ -1314,29 +1315,34 @@ let Name = NAME, IRName = IR, MaskedIRName = IR # "_mask", + UnMaskedPolicyScheme = HasPassthruOperand, ManualCodegen = [{ { - // op1, vl + if (DefaultPolicy == TAIL_AGNOSTIC) + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + auto ElemTy = cast(ResultType)->getElementType(); + Ops.insert(Ops.begin() + 2, + llvm::Constant::getAllOnesValue(ElemTy)); + // passthru, op1, op2, vl IntrinsicTypes = {ResultType, - cast(ResultType)->getElementType(), - Ops[1]->getType()}; - Ops.insert(Ops.begin() + 1, - llvm::Constant::getAllOnesValue(IntrinsicTypes[1])); - // insert undef passthru - Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + ElemTy, + Ops[3]->getType()}; break; } }], MaskedManualCodegen = [{ { std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); - Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_UNDISTURBED)); - // maskedoff, op1, mask, vl - IntrinsicTypes = {ResultType, - cast(ResultType)->getElementType(), - Ops[3]->getType()}; + if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC) + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + auto ElemTy = cast(ResultType)->getElementType(); Ops.insert(Ops.begin() + 2, - llvm::Constant::getAllOnesValue(IntrinsicTypes[1])); + llvm::Constant::getAllOnesValue(ElemTy)); + Ops.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy)); + // maskedoff, op1, po2, mask, vl, policy + IntrinsicTypes = {ResultType, + ElemTy, + Ops[4]->getType()}; break; } }] in { @@ -1366,28 +1372,29 @@ let Name = NAME, IRName = IR, MaskedIRName = IR # "_mask", + UnMaskedPolicyScheme = HasPassthruOperand, ManualCodegen = [{ { - // op1, vl - IntrinsicTypes = {ResultType, - Ops[0]->getType(), Ops[1]->getType()}; - Ops.insert(Ops.begin() + 1, Ops[0]); if (DefaultPolicy == TAIL_AGNOSTIC) Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + // op1, po2, vl + IntrinsicTypes = {ResultType, + Ops[1]->getType(), Ops[2]->getType()}; + Ops.insert(Ops.begin() + 2, Ops[1]); break; } }], MaskedManualCodegen = [{ { std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); - Ops.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy)); - // maskedoff, op1, mask, vl - IntrinsicTypes = {ResultType, - Ops[1]->getType(), - Ops[3]->getType()}; - Ops.insert(Ops.begin() + 2, Ops[1]); if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC) Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + Ops.insert(Ops.begin() + 2, Ops[1]); + Ops.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy)); + // maskedoff, op1, op2, mask, vl + IntrinsicTypes = {ResultType, + Ops[2]->getType(), + Ops.back()->getType()}; break; } }] in { @@ -1401,31 +1408,34 @@ OverloadedName = MName, IRName = IR, MaskedIRName = IR # "_mask", + UnMaskedPolicyScheme = HasPassthruOperand, ManualCodegen = [{ { - // op1, vl - IntrinsicTypes = {ResultType, - Ops[0]->getType(), - cast(Ops[0]->getType())->getElementType(), - Ops[1]->getType()}; - Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[2])); if (DefaultPolicy == TAIL_AGNOSTIC) Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + auto ElemTy = cast(ResultType)->getElementType(); + Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy)); + // passtru, op1, op2, vl + IntrinsicTypes = {ResultType, + Ops[1]->getType(), + ElemTy, + Ops[3]->getType()}; break; } }], MaskedManualCodegen = [{ { std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC) + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + auto ElemTy = cast(ResultType)->getElementType(); + Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy)); Ops.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy)); - // maskedoff, op1, mask, vl + // maskedoff, op1, op2, mask, vl, policy IntrinsicTypes = {ResultType, Ops[1]->getType(), - cast(Ops[1]->getType())->getElementType(), - Ops[3]->getType()}; - Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(IntrinsicTypes[2])); - if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC) - Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + ElemTy, + Ops[4]->getType()}; break; } }] in { @@ -1441,31 +1451,32 @@ OverloadedName = MName, IRName = IR, MaskedIRName = IR # "_mask", + UnMaskedPolicyScheme = HasPassthruOperand, ManualCodegen = [{ { - // op1, vl - IntrinsicTypes = {ResultType, - Ops[0]->getType(), - Ops[1]->getType(), - Ops[1]->getType()}; - Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[2])); if (DefaultPolicy == TAIL_AGNOSTIC) Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(Ops.back()->getType())); + // passthru, op1, xlen, vl + IntrinsicTypes = {ResultType, + Ops[1]->getType(), + Ops[3]->getType(), + Ops[3]->getType()}; break; } }], MaskedManualCodegen = [{ { std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC) + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(Ops.back()->getType())); Ops.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy)); - // maskedoff, op1, mask, vl + // maskedoff, op1, xlen, mask, vl IntrinsicTypes = {ResultType, Ops[1]->getType(), - Ops[3]->getType(), - Ops[3]->getType()}; - Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(IntrinsicTypes[2])); - if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC) - Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + Ops[4]->getType(), + Ops[4]->getType()}; break; } }] in { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c @@ -167,3 +167,56 @@ return vfabs(mask, maskedoff, op1, vl); } +// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfabs_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfabs_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfabs_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) { + return vfabs_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfabs_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfabs_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfabs_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfabs_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfabs_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfabs_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfabs_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfabs_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c @@ -167,3 +167,56 @@ return vfneg(mask, maskedoff, op1, vl); } +// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfneg_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfneg_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfneg_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) { + return vfneg_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfneg_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfneg_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfneg_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfneg_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfneg_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfneg_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfneg_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfneg_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c @@ -544,3 +544,110 @@ return vncvt_x(mask, maskedoff, src, vl); } +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vncvt_x_x_w_i16mf4_tu(vint16mf4_t merge, vint32mf2_t src, size_t vl) { + return vncvt_x_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tu(vuint16mf4_t merge, vuint32mf2_t src, size_t vl) { + return vncvt_x_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vncvt_x_x_w_i16mf4_ta(vint32mf2_t src, size_t vl) { + return vncvt_x_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vncvt_x_x_w_u16mf4_ta(vuint32mf2_t src, size_t vl) { + return vncvt_x_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vncvt_x_x_w_i16mf4_tuma(vbool64_t mask, vint16mf4_t merge, vint32mf2_t src, size_t vl) { + return vncvt_x_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tuma(vbool64_t mask, vuint16mf4_t merge, vuint32mf2_t src, size_t vl) { + return vncvt_x_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vncvt_x_x_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t merge, vint32mf2_t src, size_t vl) { + return vncvt_x_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vuint32mf2_t src, size_t vl) { + return vncvt_x_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vncvt_x_x_w_i16mf4_tama(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vncvt_x_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vncvt_x_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vncvt_x_x_w_i16mf4_tamu(vbool64_t mask, vint16mf4_t merge, vint32mf2_t src, size_t vl) { + return vncvt_x_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tamu(vbool64_t mask, vuint16mf4_t merge, vuint32mf2_t src, size_t vl) { + return vncvt_x_tamu(mask, merge, src, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c @@ -401,3 +401,56 @@ return vneg(mask, maskedoff, op1, vl); } +// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vneg_v_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t vl) { + return vneg_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vneg_v_i32mf2_ta(vint32mf2_t op1, size_t vl) { + return vneg_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vneg_v_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) { + return vneg_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vneg_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) { + return vneg_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vneg_v_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t vl) { + return vneg_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vneg_v_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) { + return vneg_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c @@ -796,3 +796,111 @@ vuint64m8_t test_vnot_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { return vnot(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnot_v_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t vl) { + return vnot_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnot_v_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) { + return vnot_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnot_v_i32mf2_ta(vint32mf2_t op1, size_t vl) { + return vnot_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnot_v_u32mf2_ta(vuint32mf2_t op1, size_t vl) { + return vnot_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnot_v_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) { + return vnot_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnot_v_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) { + return vnot_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnot_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) { + return vnot_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnot_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) { + return vnot_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnot_v_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t vl) { + return vnot_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnot_v_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t vl) { + return vnot_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnot_v_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) { + return vnot_tamu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnot_v_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) { + return vnot_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { @@ -537,10 +537,117 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { return vwcvtu_x(mask, maskedoff, src, vl); } +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwcvt_x_x_v_i64m1_tu(vint64m1_t merge, vint32mf2_t src, size_t vl) { + return vwcvt_x_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tu(vuint64m1_t merge, vuint32mf2_t src, size_t vl) { + return vwcvtu_x_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwcvt_x_x_v_i64m1_ta(vint32mf2_t src, size_t vl) { + return vwcvt_x_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwcvtu_x_x_v_u64m1_ta(vuint32mf2_t src, size_t vl) { + return vwcvtu_x_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwcvt_x_x_v_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint32mf2_t src, size_t vl) { + return vwcvt_x_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t src, size_t vl) { + return vwcvtu_x_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwcvt_x_x_v_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2_t src, size_t vl) { + return vwcvt_x_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t src, size_t vl) { + return vwcvtu_x_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwcvt_x_x_v_i64m1_tama(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vwcvt_x_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vwcvtu_x_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwcvt_x_x_v_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint32mf2_t src, size_t vl) { + return vwcvt_x_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t src, size_t vl) { + return vwcvtu_x_tamu(mask, merge, src, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c @@ -275,3 +275,57 @@ vfloat16m8_t test_vfabs_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { return vfabs_v_f16m8_m(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfabs_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfabs_v_f32mf2_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfabs_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) { + return vfabs_v_f32mf2_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfabs_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfabs_v_f32mf2_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfabs_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfabs_v_f32mf2_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfabs_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfabs_v_f32mf2_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfabs_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfabs_v_f32mf2_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c @@ -275,3 +275,57 @@ vfloat16m8_t test_vfneg_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { return vfneg_v_f16m8_m(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfneg_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfneg_v_f32mf2_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfneg_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) { + return vfneg_v_f32mf2_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfneg_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfneg_v_f32mf2_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfneg_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfneg_v_f32mf2_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfneg_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { + return vfneg_v_f32mf2_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfneg_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) { + return vfneg_v_f32mf2_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c @@ -544,3 +544,110 @@ return vncvt_x_x_w_u32m4_m(mask, maskedoff, src, vl); } +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vncvt_x_x_w_i16mf4_tu(vint16mf4_t merge, vint32mf2_t src, size_t vl) { + return vncvt_x_x_w_i16mf4_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tu(vuint16mf4_t merge, vuint32mf2_t src, size_t vl) { + return vncvt_x_x_w_u16mf4_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vncvt_x_x_w_i16mf4_ta(vint32mf2_t src, size_t vl) { + return vncvt_x_x_w_i16mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vncvt_x_x_w_u16mf4_ta(vuint32mf2_t src, size_t vl) { + return vncvt_x_x_w_u16mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vncvt_x_x_w_i16mf4_tuma(vbool64_t mask, vint16mf4_t merge, vint32mf2_t src, size_t vl) { + return vncvt_x_x_w_i16mf4_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tuma(vbool64_t mask, vuint16mf4_t merge, vuint32mf2_t src, size_t vl) { + return vncvt_x_x_w_u16mf4_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vncvt_x_x_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t merge, vint32mf2_t src, size_t vl) { + return vncvt_x_x_w_i16mf4_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vuint32mf2_t src, size_t vl) { + return vncvt_x_x_w_u16mf4_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vncvt_x_x_w_i16mf4_tama(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vncvt_x_x_w_i16mf4_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vncvt_x_x_w_u16mf4_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vncvt_x_x_w_i16mf4_tamu(vbool64_t mask, vint16mf4_t merge, vint32mf2_t src, size_t vl) { + return vncvt_x_x_w_i16mf4_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tamu(vbool64_t mask, vuint16mf4_t merge, vuint32mf2_t src, size_t vl) { + return vncvt_x_x_w_u16mf4_tamu(mask, merge, src, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c @@ -401,3 +401,56 @@ return vneg_v_i64m8_m(mask, maskedoff, op1, vl); } +// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vneg_v_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t vl) { + return vneg_v_i32mf2_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vneg_v_i32mf2_ta(vint32mf2_t op1, size_t vl) { + return vneg_v_i32mf2_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vneg_v_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) { + return vneg_v_i32mf2_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vneg_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) { + return vneg_v_i32mf2_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vneg_v_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t vl) { + return vneg_v_i32mf2_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vneg_v_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) { + return vneg_v_i32mf2_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c @@ -796,3 +796,111 @@ vuint64m8_t test_vnot_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { return vnot_v_u64m8_m(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnot_v_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t vl) { + return vnot_v_i32mf2_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnot_v_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) { + return vnot_v_u32mf2_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnot_v_i32mf2_ta(vint32mf2_t op1, size_t vl) { + return vnot_v_i32mf2_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnot_v_u32mf2_ta(vuint32mf2_t op1, size_t vl) { + return vnot_v_u32mf2_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnot_v_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) { + return vnot_v_i32mf2_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnot_v_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) { + return vnot_v_u32mf2_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnot_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) { + return vnot_v_i32mf2_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnot_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) { + return vnot_v_u32mf2_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnot_v_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t vl) { + return vnot_v_i32mf2_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnot_v_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t vl) { + return vnot_v_u32mf2_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnot_v_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) { + return vnot_v_i32mf2_tamu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnot_v_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) { + return vnot_v_u32mf2_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { @@ -537,10 +537,117 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { return vwcvtu_x_x_v_u64m8_m(mask, maskedoff, src, vl); } +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwcvt_x_x_v_i64m1_tu(vint64m1_t merge, vint32mf2_t src, size_t vl) { + return vwcvt_x_x_v_i64m1_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tu(vuint64m1_t merge, vuint32mf2_t src, size_t vl) { + return vwcvtu_x_x_v_u64m1_tu(merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwcvt_x_x_v_i64m1_ta(vint32mf2_t src, size_t vl) { + return vwcvt_x_x_v_i64m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwcvtu_x_x_v_u64m1_ta(vuint32mf2_t src, size_t vl) { + return vwcvtu_x_x_v_u64m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwcvt_x_x_v_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint32mf2_t src, size_t vl) { + return vwcvt_x_x_v_i64m1_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t src, size_t vl) { + return vwcvtu_x_x_v_u64m1_tuma(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwcvt_x_x_v_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2_t src, size_t vl) { + return vwcvt_x_x_v_i64m1_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t src, size_t vl) { + return vwcvtu_x_x_v_u64m1_tumu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwcvt_x_x_v_i64m1_tama(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vwcvt_x_x_v_i64m1_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vwcvtu_x_x_v_u64m1_tama(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwcvt_x_x_v_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint32mf2_t src, size_t vl) { + return vwcvt_x_x_v_i64m1_tamu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t src, size_t vl) { + return vwcvtu_x_x_v_u64m1_tamu(mask, merge, src, vl); +}