diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -173,20 +173,13 @@ // This builtin has a granted vector length parameter. bit HasVL = true; - // There are several cases for specifying tail policy. - // 1. Add tail policy argument to masked intrinsics. It may have the maskedoff - // argument or not. - // * Have the maskedoff argument: (HasPolicy = true, HasMaskedOffOperand = true) - // Ex: vadd_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, ta); - // * Do not have the maskedoff argument: (HasPolicy = true, HasMaskedOffOperand = false) - // Ex: vmacc_vv_i8m1_mt(mask, vd, vs1, vs2, vl, ta); - // 2. Add dest argument for no mask intrinsics. (TODO) - // Ex: vmv_v_x_i8m1_t(dest, src, vl); - // 3. Always tail agnostic. (HasPolicy = false) - // Ex: vmseq_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl); - // The tail policy argument is located at the last position. + // The masked intrinsic IR have the policy operand. + // The policy argument is located at the last position. bit HasPolicy = true; + // The nomask intrinsic IR have the passthru operand. + bit HasNoMaskPassThru = false; + // This builtin supports non-masked function overloading api. // All masked operations support overloading api. bit HasNoMaskedOverloaded = true; @@ -1290,6 +1283,8 @@ cast(ResultType)->getElementType(), Ops[1]->getType()}; Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[1])); + // insert undef passthru + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); break; } }], @@ -1321,6 +1316,8 @@ Ops[1]->getType()}; Ops.insert(Ops.begin() + 1, llvm::Constant::getAllOnesValue(IntrinsicTypes[1])); + // insert undef passthru + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); break; } }], @@ -1369,6 +1366,8 @@ IntrinsicTypes = {ResultType, Ops[0]->getType(), Ops[1]->getType()}; Ops.insert(Ops.begin() + 1, Ops[0]); + // insert undef passthru + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); break; } }], @@ -1402,6 +1401,8 @@ cast(Ops[0]->getType())->getElementType(), Ops[1]->getType()}; Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[2])); + // insert undef passthru + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); break; } }], @@ -1438,6 +1439,8 @@ Ops[1]->getType(), Ops[1]->getType()}; Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[2])); + // insert undef passthru + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); break; } }], @@ -1583,15 +1586,18 @@ // 12. Vector Integer Arithmetic Instructions // 12.1. Vector Single-Width Integer Add and Subtract +let HasNoMaskPassThru = true in { defm vadd : RVVIntBinBuiltinSet; defm vsub : RVVIntBinBuiltinSet; defm vrsub : RVVOutOp1BuiltinSet<"vrsub", "csil", [["vx", "v", "vve"], ["vx", "Uv", "UvUvUe"]]>; +} defm vneg_v : RVVPseudoUnaryBuiltin<"vrsub", "csil">; // 12.2. Vector Widening Integer Add/Subtract // Widening unsigned integer add/subtract, 2*SEW = SEW +/- SEW +let HasNoMaskPassThru = true in { defm vwaddu : RVVUnsignedWidenBinBuiltinSet; defm vwsubu : RVVUnsignedWidenBinBuiltinSet; // Widening signed integer add/subtract, 2*SEW = SEW +/- SEW @@ -1603,6 +1609,7 @@ // Widening signed integer add/subtract, 2*SEW = 2*SEW +/- SEW defm vwadd : RVVSignedWidenOp0BinBuiltinSet; defm vwsub : RVVSignedWidenOp0BinBuiltinSet; +} defm vwcvtu_x_x_v : RVVPseudoVWCVTBuiltin<"vwaddu", "vwcvtu_x", "csi", [["Uw", "UwUv"]]>; defm vwcvt_x_x_v : RVVPseudoVWCVTBuiltin<"vwadd", "vwcvt_x", "csi", @@ -1633,12 +1640,15 @@ } // 12.5. Vector Bitwise Logical Instructions +let HasNoMaskPassThru = true in { defm vand : RVVIntBinBuiltinSet; defm vxor : RVVIntBinBuiltinSet; defm vor : RVVIntBinBuiltinSet; +} defm vnot_v : RVVPseudoVNotBuiltin<"vxor", "csil">; // 12.6. Vector Single-Width Bit Shift Instructions +let HasNoMaskPassThru = true in { defm vsll : RVVShiftBuiltinSet; defm vsrl : RVVUnsignedShiftBuiltinSet; defm vsra : RVVSignedShiftBuiltinSet; @@ -1646,6 +1656,7 @@ // 12.7. Vector Narrowing Integer Right Shift Instructions defm vnsrl : RVVUnsignedNShiftBuiltinSet; defm vnsra : RVVSignedNShiftBuiltinSet; +} defm vncvt_x_x_w : RVVPseudoVNCVTBuiltin<"vnsrl", "vncvt_x", "csi", [["v", "vw"], ["Uv", "UvUw"]]>; @@ -1665,6 +1676,7 @@ } // 12.9. Vector Integer Min/Max Instructions +let HasNoMaskPassThru = true in { defm vminu : RVVUnsignedBinBuiltinSet; defm vmin : RVVSignedBinBuiltinSet; defm vmaxu : RVVUnsignedBinBuiltinSet; @@ -1685,9 +1697,10 @@ defm vdiv : RVVSignedBinBuiltinSet; defm vremu : RVVUnsignedBinBuiltinSet; defm vrem : RVVSignedBinBuiltinSet; +} // 12.12. Vector Widening Integer Multiply Instructions -let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { +let Log2LMUL = [-3, -2, -1, 0, 1, 2], HasNoMaskPassThru = true in { defm vwmul : RVVOutOp0Op1BuiltinSet<"vwmul", "csi", [["vv", "w", "wvv"], ["vx", "w", "wve"]]>; @@ -1751,6 +1764,7 @@ // 13. Vector Fixed-Point Arithmetic Instructions // 13.1. Vector Single-Width Saturating Add and Subtract +let HasNoMaskPassThru = true in { defm vsaddu : RVVUnsignedBinBuiltinSet; defm vsadd : RVVSignedBinBuiltinSet; defm vssubu : RVVUnsignedBinBuiltinSet; @@ -1800,6 +1814,7 @@ [["vv", "w", "wvv"], ["vf", "w", "wve"]]>; } +} // 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions defm vfmacc : RVVFloatingTerBuiltinSet; @@ -1827,6 +1842,7 @@ def vfrec7 : RVVFloatingUnaryVVBuiltin; // 14.11. Vector Floating-Point MIN/MAX Instructions +let HasNoMaskPassThru = true in { defm vfmin : RVVFloatingBinBuiltinSet; defm vfmax : RVVFloatingBinBuiltinSet; @@ -1834,6 +1850,7 @@ defm vfsgnj : RVVFloatingBinBuiltinSet; defm vfsgnjn : RVVFloatingBinBuiltinSet; defm vfsgnjx : RVVFloatingBinBuiltinSet; +} defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "xfd">; defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "xfd">; @@ -2005,6 +2022,7 @@ defm vslidedown : RVVSlideBuiltinSet; // 17.3.3. Vector Slide1up Instructions +let HasNoMaskPassThru = true in { defm vslide1up : RVVSlideOneBuiltinSet; defm vfslide1up : RVVFloatingBinVFBuiltinSet; @@ -2027,6 +2045,7 @@ [["vx", "Uv", "UvUvz"]]>; defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csil", [["vv", "Uv", "UvUv(Log2EEW:4)Uv"]]>; +} // 17.5. Vector Compress Instruction let HasMask = false, HasPolicy = false, diff --git a/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c b/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c --- a/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c +++ b/clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c @@ -25,7 +25,7 @@ // CHECK-NEXT: [[TMP0:%.*]] = load , * [[OP0_ADDR]], align 1 // CHECK-NEXT: [[TMP1:%.*]] = load , * [[OP1_ADDR]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* [[VL_ADDR]], align 8 -// CHECK-NEXT: [[TMP3:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[TMP0]], [[TMP1]], i64 [[TMP2]]) +// CHECK-NEXT: [[TMP3:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( undef, [[TMP0]], [[TMP1]], i64 [[TMP2]]) // CHECK-NEXT: store [[TMP3]], * [[RET]], align 1 // CHECK-NEXT: [[TMP4:%.*]] = load , * [[RET]], align 1 // CHECK-NEXT: ret [[TMP4]] diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -97,7 +97,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -115,7 +115,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -124,7 +124,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -133,7 +133,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -205,7 +205,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -214,7 +214,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -223,7 +223,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -232,7 +232,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -259,7 +259,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -268,7 +268,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -277,7 +277,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -286,7 +286,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -295,7 +295,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -322,7 +322,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -331,7 +331,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -349,7 +349,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -358,7 +358,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -376,7 +376,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -385,7 +385,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -394,7 +394,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -403,7 +403,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -412,7 +412,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -421,7 +421,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -430,7 +430,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -439,7 +439,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -448,7 +448,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -457,7 +457,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -466,7 +466,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -475,7 +475,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -484,7 +484,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -493,7 +493,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -502,7 +502,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -511,7 +511,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -520,7 +520,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -529,7 +529,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -556,7 +556,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -565,7 +565,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -574,7 +574,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -583,7 +583,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -592,7 +592,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -601,7 +601,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -610,7 +610,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -619,7 +619,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -628,7 +628,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -637,7 +637,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -646,7 +646,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -655,7 +655,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -664,7 +664,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -673,7 +673,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -682,7 +682,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -691,7 +691,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -700,7 +700,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -709,7 +709,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -718,7 +718,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -727,7 +727,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -736,7 +736,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -745,7 +745,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -754,7 +754,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -763,7 +763,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -772,7 +772,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -781,7 +781,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -790,7 +790,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfabs_v_f64m8 (vfloat64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -125,7 +125,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) { @@ -134,7 +134,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -143,7 +143,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) { @@ -152,7 +152,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -233,7 +233,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) { @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) { @@ -260,7 +260,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -269,7 +269,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -93,7 +93,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -112,7 +112,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -121,7 +121,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -131,7 +131,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -140,7 +140,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -93,7 +93,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -112,7 +112,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -121,7 +121,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -131,7 +131,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -140,7 +140,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -93,7 +93,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -112,7 +112,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -121,7 +121,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -131,7 +131,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -140,7 +140,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -93,7 +93,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -112,7 +112,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -121,7 +121,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -131,7 +131,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -140,7 +140,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -93,7 +93,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -112,7 +112,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -121,7 +121,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -131,7 +131,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -140,7 +140,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -226,7 +226,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -235,7 +235,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -245,7 +245,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -254,7 +254,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -264,7 +264,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -273,7 +273,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -283,7 +283,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -292,7 +292,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -302,7 +302,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -311,7 +311,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -349,7 +349,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -359,7 +359,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -397,7 +397,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -406,7 +406,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -416,7 +416,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -425,7 +425,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -435,7 +435,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -444,7 +444,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -454,7 +454,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -463,7 +463,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -473,7 +473,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -482,7 +482,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -511,7 +511,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value, @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value, @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value, @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value, @@ -47,7 +47,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value, @@ -57,7 +57,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value, @@ -67,7 +67,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value, @@ -77,7 +77,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value, @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value, @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value, @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value, @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value, @@ -47,7 +47,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value, @@ -57,7 +57,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value, @@ -67,7 +67,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value, @@ -77,7 +77,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value, @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -93,7 +93,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -112,7 +112,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -121,7 +121,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -131,7 +131,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -140,7 +140,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -55,7 +55,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, @@ -74,7 +74,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -93,7 +93,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, @@ -112,7 +112,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { @@ -121,7 +121,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -131,7 +131,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { @@ -140,7 +140,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -55,7 +55,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -74,7 +74,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -55,7 +55,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -64,7 +64,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, @@ -74,7 +74,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { @@ -83,7 +83,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -93,7 +93,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -102,7 +102,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, @@ -112,7 +112,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { @@ -121,7 +121,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -131,7 +131,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { @@ -140,7 +140,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -798,7 +798,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -807,7 +807,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -816,7 +816,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -825,7 +825,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -834,7 +834,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -843,7 +843,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -852,7 +852,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -861,7 +861,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -870,7 +870,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -879,7 +879,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -888,7 +888,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -897,7 +897,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -906,7 +906,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -915,7 +915,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -924,7 +924,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -933,7 +933,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -942,7 +942,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -951,7 +951,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -960,7 +960,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -969,7 +969,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -978,7 +978,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -987,7 +987,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -996,7 +996,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1005,7 +1005,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -1014,7 +1014,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1023,7 +1023,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -1032,7 +1032,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1041,7 +1041,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1050,7 +1050,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1059,7 +1059,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -1068,7 +1068,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -1086,7 +1086,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1095,7 +1095,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -1104,7 +1104,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1113,7 +1113,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -1122,7 +1122,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1131,7 +1131,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1140,7 +1140,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1149,7 +1149,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1158,7 +1158,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1176,7 +1176,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1185,7 +1185,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1194,7 +1194,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1203,7 +1203,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1212,7 +1212,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1221,7 +1221,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1230,7 +1230,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1239,7 +1239,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1248,7 +1248,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1266,7 +1266,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1275,7 +1275,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1284,7 +1284,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1293,7 +1293,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1302,7 +1302,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1311,7 +1311,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1320,7 +1320,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1329,7 +1329,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1338,7 +1338,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1356,7 +1356,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1365,7 +1365,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1374,7 +1374,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1383,7 +1383,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1392,7 +1392,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1401,7 +1401,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1410,7 +1410,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1419,7 +1419,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1428,7 +1428,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1446,7 +1446,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1455,7 +1455,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1464,7 +1464,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1473,7 +1473,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1482,7 +1482,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1491,7 +1491,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1500,7 +1500,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1509,7 +1509,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { @@ -1518,7 +1518,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { @@ -1536,7 +1536,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1545,7 +1545,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { @@ -1554,7 +1554,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1563,7 +1563,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { @@ -1572,7 +1572,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1581,7 +1581,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1590,7 +1590,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1599,7 +1599,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1608,7 +1608,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { @@ -1626,7 +1626,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1635,7 +1635,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { @@ -1644,7 +1644,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1653,7 +1653,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { @@ -1662,7 +1662,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1671,7 +1671,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { @@ -1680,7 +1680,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1689,7 +1689,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1698,7 +1698,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { @@ -1716,7 +1716,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1725,7 +1725,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { @@ -1734,7 +1734,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1743,7 +1743,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { @@ -1752,7 +1752,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1761,7 +1761,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, @@ -124,7 +124,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { @@ -133,7 +133,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, @@ -143,7 +143,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { @@ -152,7 +152,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, @@ -289,7 +289,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -298,7 +298,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, @@ -308,7 +308,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -317,7 +317,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, @@ -327,7 +327,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { @@ -336,7 +336,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -345,7 +345,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { @@ -354,7 +354,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -363,7 +363,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { @@ -372,7 +372,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -381,7 +381,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { @@ -390,7 +390,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, @@ -400,7 +400,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -409,7 +409,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, @@ -419,7 +419,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { @@ -428,7 +428,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, @@ -457,7 +457,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { @@ -466,7 +466,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, @@ -476,7 +476,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { @@ -485,7 +485,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, @@ -514,7 +514,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { @@ -523,7 +523,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, @@ -533,7 +533,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { @@ -542,7 +542,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, @@ -552,7 +552,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vncvt_x_x_w_u32m4 (vuint64m8_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) { @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) { @@ -97,7 +97,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) { @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) { @@ -115,7 +115,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) { @@ -124,7 +124,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) { @@ -133,7 +133,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) { @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) { @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) { @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) { @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) { @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) { @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vneg_v_i64m8 (vint64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) { @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) { @@ -97,7 +97,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) { @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) { @@ -115,7 +115,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) { @@ -124,7 +124,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) { @@ -133,7 +133,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) { @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) { @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) { @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) { @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) { @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) { @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) { @@ -205,7 +205,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) { @@ -214,7 +214,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) { @@ -223,7 +223,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) { @@ -232,7 +232,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) { @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) { @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) { @@ -259,7 +259,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) { @@ -268,7 +268,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) { @@ -277,7 +277,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) { @@ -286,7 +286,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) { @@ -295,7 +295,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) { @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) { @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) { @@ -322,7 +322,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) { @@ -331,7 +331,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) { @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) { @@ -349,7 +349,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) { @@ -358,7 +358,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) { @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) { @@ -376,7 +376,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) { @@ -385,7 +385,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) { @@ -394,7 +394,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnot_v_u64m8 (vuint64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, @@ -55,7 +55,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) { @@ -64,7 +64,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) { @@ -73,7 +73,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) { @@ -82,7 +82,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) { @@ -91,7 +91,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) { @@ -100,7 +100,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) { @@ -109,7 +109,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) { @@ -118,7 +118,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) { @@ -127,7 +127,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) { @@ -136,7 +136,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, @@ -146,7 +146,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) { @@ -155,7 +155,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, @@ -165,7 +165,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) { @@ -174,7 +174,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, @@ -184,7 +184,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) { @@ -193,7 +193,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, @@ -203,7 +203,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) { @@ -212,7 +212,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) { @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, @@ -260,7 +260,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) { @@ -269,7 +269,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, @@ -298,7 +298,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) { @@ -307,7 +307,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, @@ -317,7 +317,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) { @@ -326,7 +326,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, @@ -336,7 +336,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) { @@ -345,7 +345,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, @@ -355,7 +355,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) { @@ -364,7 +364,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, @@ -374,7 +374,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) { @@ -383,7 +383,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, @@ -412,7 +412,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) { @@ -421,7 +421,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, @@ -431,7 +431,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) { @@ -440,7 +440,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, @@ -450,7 +450,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, @@ -469,7 +469,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) { @@ -478,7 +478,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) { @@ -487,7 +487,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) { @@ -496,7 +496,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) { @@ -505,7 +505,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) { @@ -514,7 +514,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) { @@ -523,7 +523,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) { @@ -532,7 +532,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) { @@ -541,7 +541,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) { @@ -550,7 +550,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, @@ -560,7 +560,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, @@ -570,7 +570,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, @@ -580,7 +580,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, @@ -590,7 +590,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, @@ -619,7 +619,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) { @@ -628,7 +628,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) { @@ -647,7 +647,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, @@ -676,7 +676,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, @@ -686,7 +686,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, @@ -696,7 +696,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) { @@ -705,7 +705,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, @@ -715,7 +715,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) { @@ -724,7 +724,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, @@ -734,7 +734,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) { @@ -743,7 +743,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, @@ -772,7 +772,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) { @@ -781,7 +781,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, @@ -791,7 +791,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) { @@ -800,7 +800,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, @@ -829,7 +829,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) { @@ -838,7 +838,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, @@ -848,7 +848,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, @@ -858,7 +858,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, @@ -868,7 +868,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) { @@ -877,7 +877,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, @@ -887,7 +887,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) { @@ -896,7 +896,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, @@ -906,7 +906,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) { @@ -915,7 +915,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, @@ -925,7 +925,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) { @@ -934,7 +934,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, @@ -944,7 +944,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) { @@ -953,7 +953,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, @@ -963,7 +963,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) { @@ -972,7 +972,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, @@ -982,7 +982,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) { @@ -991,7 +991,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, @@ -1001,7 +1001,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) { @@ -1010,7 +1010,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, @@ -1020,7 +1020,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, @@ -1030,7 +1030,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, @@ -1040,7 +1040,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) { @@ -1049,7 +1049,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) { @@ -1058,7 +1058,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, @@ -1077,7 +1077,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, @@ -1087,7 +1087,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, @@ -1097,7 +1097,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, @@ -1107,7 +1107,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, @@ -1117,7 +1117,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, @@ -1127,7 +1127,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, @@ -1137,7 +1137,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, @@ -1147,7 +1147,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, @@ -1157,7 +1157,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, @@ -1167,7 +1167,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, @@ -1177,7 +1177,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, @@ -1187,7 +1187,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, @@ -1197,7 +1197,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, @@ -1207,7 +1207,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, @@ -1217,7 +1217,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, @@ -1227,7 +1227,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, @@ -1237,7 +1237,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, @@ -1247,7 +1247,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, @@ -1257,7 +1257,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, @@ -1267,7 +1267,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, @@ -1277,7 +1277,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -1287,7 +1287,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -1297,7 +1297,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, @@ -1307,7 +1307,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, @@ -1317,7 +1317,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, @@ -1327,7 +1327,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, @@ -1337,7 +1337,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -1347,7 +1347,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, @@ -1357,7 +1357,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, @@ -1367,7 +1367,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, @@ -1377,7 +1377,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, @@ -1387,7 +1387,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, @@ -1397,7 +1397,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, @@ -1407,7 +1407,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, @@ -1417,7 +1417,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, @@ -1427,7 +1427,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, @@ -1437,7 +1437,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, @@ -1447,7 +1447,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, @@ -1457,7 +1457,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, @@ -1467,7 +1467,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, @@ -1477,7 +1477,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, @@ -1487,7 +1487,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, @@ -1497,7 +1497,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, @@ -1507,7 +1507,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { @@ -125,7 +125,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, @@ -217,7 +217,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, @@ -227,7 +227,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, @@ -237,7 +237,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { @@ -246,7 +246,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { @@ -255,7 +255,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { @@ -264,7 +264,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { @@ -273,7 +273,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, @@ -283,7 +283,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, @@ -293,7 +293,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, @@ -323,7 +323,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, @@ -343,7 +343,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, @@ -353,7 +353,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, @@ -363,7 +363,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, @@ -373,7 +373,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, @@ -383,7 +383,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, @@ -403,7 +403,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, @@ -413,7 +413,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { @@ -125,7 +125,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, @@ -280,7 +280,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, @@ -290,7 +290,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, @@ -300,7 +300,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, @@ -310,7 +310,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, @@ -320,7 +320,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, @@ -350,7 +350,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, @@ -370,7 +370,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, @@ -380,7 +380,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, @@ -390,7 +390,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, @@ -400,7 +400,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, @@ -410,7 +410,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -233,7 +233,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -233,7 +233,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, @@ -556,7 +556,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -565,7 +565,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, @@ -613,7 +613,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { @@ -622,7 +622,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -631,7 +631,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -640,7 +640,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { @@ -649,7 +649,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { @@ -658,7 +658,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -667,7 +667,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -676,7 +676,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { @@ -685,7 +685,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { @@ -694,7 +694,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -703,7 +703,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -712,7 +712,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { @@ -721,7 +721,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { @@ -730,7 +730,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -739,7 +739,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -748,7 +748,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { @@ -757,7 +757,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { @@ -766,7 +766,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, @@ -776,7 +776,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -785,7 +785,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -795,7 +795,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { @@ -804,7 +804,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, @@ -814,7 +814,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -823,7 +823,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { @@ -832,7 +832,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { @@ -841,7 +841,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -850,7 +850,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -859,7 +859,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { @@ -868,7 +868,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { @@ -877,7 +877,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -886,7 +886,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -895,7 +895,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { @@ -904,7 +904,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { @@ -913,7 +913,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -922,7 +922,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -931,7 +931,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { @@ -940,7 +940,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { @@ -949,7 +949,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, @@ -959,7 +959,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -968,7 +968,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { @@ -977,7 +977,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { @@ -986,7 +986,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -995,7 +995,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1004,7 +1004,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { @@ -1013,7 +1013,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { @@ -1022,7 +1022,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1031,7 +1031,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1040,7 +1040,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { @@ -1049,7 +1049,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { @@ -1058,7 +1058,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1076,7 +1076,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,7 +1085,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) { @@ -798,7 +798,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -807,7 +807,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, @@ -556,7 +556,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -565,7 +565,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, @@ -613,7 +613,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { @@ -622,7 +622,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -631,7 +631,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -640,7 +640,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { @@ -649,7 +649,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { @@ -658,7 +658,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -667,7 +667,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -676,7 +676,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { @@ -685,7 +685,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { @@ -694,7 +694,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -703,7 +703,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -712,7 +712,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { @@ -721,7 +721,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { @@ -730,7 +730,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -739,7 +739,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -748,7 +748,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { @@ -757,7 +757,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { @@ -766,7 +766,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, @@ -776,7 +776,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -785,7 +785,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -795,7 +795,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { @@ -804,7 +804,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, @@ -814,7 +814,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -823,7 +823,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { @@ -832,7 +832,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { @@ -841,7 +841,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -850,7 +850,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -859,7 +859,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { @@ -868,7 +868,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { @@ -877,7 +877,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -886,7 +886,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -895,7 +895,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { @@ -904,7 +904,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { @@ -913,7 +913,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -922,7 +922,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -931,7 +931,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { @@ -940,7 +940,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { @@ -949,7 +949,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, @@ -959,7 +959,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -968,7 +968,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { @@ -977,7 +977,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { @@ -986,7 +986,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -995,7 +995,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1004,7 +1004,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { @@ -1013,7 +1013,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { @@ -1022,7 +1022,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1031,7 +1031,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1040,7 +1040,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { @@ -1049,7 +1049,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { @@ -1058,7 +1058,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1076,7 +1076,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,7 +1085,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -97,7 +97,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -115,7 +115,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -124,7 +124,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -133,7 +133,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -205,7 +205,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -214,7 +214,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -223,7 +223,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -232,7 +232,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -259,7 +259,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -268,7 +268,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -277,7 +277,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -286,7 +286,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -295,7 +295,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -322,7 +322,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -331,7 +331,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -349,7 +349,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -358,7 +358,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -376,7 +376,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -385,7 +385,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -394,7 +394,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -403,7 +403,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -412,7 +412,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -421,7 +421,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -430,7 +430,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -439,7 +439,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -448,7 +448,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -457,7 +457,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -466,7 +466,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -475,7 +475,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -484,7 +484,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -493,7 +493,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -502,7 +502,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -511,7 +511,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -520,7 +520,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -529,7 +529,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -556,7 +556,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -565,7 +565,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -574,7 +574,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -583,7 +583,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -592,7 +592,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -601,7 +601,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -610,7 +610,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -619,7 +619,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -628,7 +628,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -637,7 +637,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -646,7 +646,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -655,7 +655,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -664,7 +664,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -673,7 +673,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -682,7 +682,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -691,7 +691,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -700,7 +700,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -709,7 +709,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -718,7 +718,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -727,7 +727,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -736,7 +736,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -745,7 +745,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -754,7 +754,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -763,7 +763,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -772,7 +772,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -781,7 +781,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -790,7 +790,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vand_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vand_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vasub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vasub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfabs_v_f64m8 (vfloat64m8_t op1, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfabs_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfabs_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfabs_v_f16m1 (vfloat16m1_t op1, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfabs_v_f16m2 (vfloat16m2_t op1, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfabs_v_f16m4 (vfloat16m4_t op1, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vfabs_v_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfabs_v_f16m8 (vfloat16m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -125,7 +125,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) { @@ -134,7 +134,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -143,7 +143,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) { @@ -152,7 +152,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -233,7 +233,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) { @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) { @@ -260,7 +260,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -269,7 +269,7 @@ // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -94,7 +94,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -113,7 +113,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -122,7 +122,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfdiv_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -377,7 +377,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -386,7 +386,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfdiv_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -395,7 +395,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -404,7 +404,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfdiv_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -413,7 +413,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -422,7 +422,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfdiv_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -431,7 +431,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -440,7 +440,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfdiv_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -449,7 +449,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfdiv_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -467,7 +467,7 @@ // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfdiv_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -94,7 +94,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -113,7 +113,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -122,7 +122,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmax_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -377,7 +377,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmax_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -386,7 +386,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmax_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -395,7 +395,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmax_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -404,7 +404,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmax_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -413,7 +413,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmax_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -422,7 +422,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmax_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -431,7 +431,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmax_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -440,7 +440,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmax_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -449,7 +449,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmax_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmax_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -467,7 +467,7 @@ // CHECK-RV64-LABEL: @test_vfmax_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmax_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -94,7 +94,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -113,7 +113,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -122,7 +122,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmin_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -377,7 +377,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmin_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -386,7 +386,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmin_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -395,7 +395,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmin_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -404,7 +404,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmin_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -413,7 +413,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmin_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -422,7 +422,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmin_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -431,7 +431,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmin_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -440,7 +440,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmin_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -449,7 +449,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmin_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmin_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -467,7 +467,7 @@ // CHECK-RV64-LABEL: @test_vfmin_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmin_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -94,7 +94,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -113,7 +113,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -122,7 +122,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmul_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -377,7 +377,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmul_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -386,7 +386,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmul_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -395,7 +395,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmul_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -404,7 +404,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmul_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -413,7 +413,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmul_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -422,7 +422,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmul_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -431,7 +431,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmul_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -440,7 +440,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmul_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -449,7 +449,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmul_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmul_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -467,7 +467,7 @@ // CHECK-RV64-LABEL: @test_vfmul_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmul_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfneg_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfneg_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfneg_v_f16m1 (vfloat16m1_t op1, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfneg_v_f16m2 (vfloat16m2_t op1, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfneg_v_f16m4 (vfloat16m4_t op1, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vfneg_v_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfneg_v_f16m8 (vfloat16m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrdiv_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrsub_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -94,7 +94,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -113,7 +113,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -122,7 +122,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -208,7 +208,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -217,7 +217,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -227,7 +227,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -236,7 +236,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -246,7 +246,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -255,7 +255,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -265,7 +265,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -274,7 +274,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -284,7 +284,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -293,7 +293,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -322,7 +322,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -331,7 +331,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -341,7 +341,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -350,7 +350,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -379,7 +379,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -388,7 +388,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -398,7 +398,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -407,7 +407,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -417,7 +417,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -426,7 +426,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -436,7 +436,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -445,7 +445,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -455,7 +455,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -464,7 +464,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -493,7 +493,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -502,7 +502,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -512,7 +512,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -1090,7 +1090,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnj_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -1099,7 +1099,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnj_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -1108,7 +1108,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnj_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -1117,7 +1117,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnj_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -1126,7 +1126,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnj_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -1135,7 +1135,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnj_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -1144,7 +1144,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnj_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -1153,7 +1153,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnj_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -1162,7 +1162,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnj_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -1171,7 +1171,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnj_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -1180,7 +1180,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnj_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -1189,7 +1189,7 @@ // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnj_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -1198,7 +1198,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjn_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -1207,7 +1207,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjn_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -1216,7 +1216,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjn_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -1225,7 +1225,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjn_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -1234,7 +1234,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjn_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -1243,7 +1243,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjn_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -1252,7 +1252,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjn_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -1261,7 +1261,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjn_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -1270,7 +1270,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjn_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -1279,7 +1279,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjn_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -1288,7 +1288,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjn_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -1297,7 +1297,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjn_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -1306,7 +1306,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjx_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -1315,7 +1315,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjx_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -1324,7 +1324,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjx_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -1333,7 +1333,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjx_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -1342,7 +1342,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjx_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -1351,7 +1351,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjx_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -1360,7 +1360,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjx_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -1369,7 +1369,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjx_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -1378,7 +1378,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjx_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -1387,7 +1387,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjx_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -1396,7 +1396,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjx_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -1405,7 +1405,7 @@ // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjx_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value, @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value, @@ -28,7 +28,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value, @@ -38,7 +38,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value, @@ -48,7 +48,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value, @@ -58,7 +58,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value, @@ -68,7 +68,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value, @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value, @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value, @@ -203,7 +203,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfslide1down_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size_t vl) { @@ -212,7 +212,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfslide1down_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size_t vl) { @@ -221,7 +221,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfslide1down_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t vl) { @@ -230,7 +230,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfslide1down_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t vl) { @@ -239,7 +239,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfslide1down_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t vl) { @@ -248,7 +248,7 @@ // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv32f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv32f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfslide1down_vf_f16m8 (vfloat16m8_t src, _Float16 value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value, @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value, @@ -28,7 +28,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value, @@ -38,7 +38,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value, @@ -48,7 +48,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f32.f32.i64( undef, [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value, @@ -58,7 +58,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value, @@ -68,7 +68,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value, @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value, @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f64.f64.i64( undef, [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value, @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfslide1up_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfslide1up_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfslide1up_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfslide1up_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfslide1up_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv32f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv32f16.f16.i64( undef, [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfslide1up_vf_f16m8 (vfloat16m8_t src, _Float16 value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -94,7 +94,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -113,7 +113,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -122,7 +122,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.f64.i64( undef, [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsub_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -377,7 +377,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -386,7 +386,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsub_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -395,7 +395,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -404,7 +404,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsub_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -413,7 +413,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -422,7 +422,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsub_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -431,7 +431,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -440,7 +440,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsub_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -449,7 +449,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsub_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -467,7 +467,7 @@ // CHECK-RV64-LABEL: @test_vfsub_vf_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsub_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -56,7 +56,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, @@ -75,7 +75,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -94,7 +94,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, @@ -113,7 +113,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { @@ -122,7 +122,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { @@ -328,7 +328,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -337,7 +337,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { @@ -355,7 +355,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) { @@ -364,7 +364,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -373,7 +373,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -382,7 +382,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { @@ -391,7 +391,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) { @@ -400,7 +400,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -409,7 +409,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -418,7 +418,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { @@ -427,7 +427,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) { @@ -436,7 +436,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -445,7 +445,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -454,7 +454,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { @@ -463,7 +463,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) { @@ -472,7 +472,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -481,7 +481,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -490,7 +490,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { @@ -499,7 +499,7 @@ // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_wf_f32m8 (vfloat32m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -56,7 +56,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -75,7 +75,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmul_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmul_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmul_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmul_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmul_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmul_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmul_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmul_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmul_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmul_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { @@ -46,7 +46,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -56,7 +56,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -65,7 +65,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, @@ -75,7 +75,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { @@ -84,7 +84,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -94,7 +94,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -103,7 +103,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, @@ -113,7 +113,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { @@ -122,7 +122,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { @@ -328,7 +328,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -337,7 +337,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { @@ -355,7 +355,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) { @@ -364,7 +364,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -373,7 +373,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -382,7 +382,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { @@ -391,7 +391,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) { @@ -400,7 +400,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -409,7 +409,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -418,7 +418,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { @@ -427,7 +427,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) { @@ -436,7 +436,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -445,7 +445,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -454,7 +454,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { @@ -463,7 +463,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) { @@ -472,7 +472,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -481,7 +481,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -490,7 +490,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { @@ -499,7 +499,7 @@ // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_wf_f32m8 (vfloat32m8_t op1, _Float16 op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vmax_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vmax_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vmin_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vmin_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vminu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vminu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -45,7 +45,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -63,7 +63,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -81,7 +81,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -90,7 +90,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vmul_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vmul_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -798,7 +798,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -807,7 +807,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -816,7 +816,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -825,7 +825,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -834,7 +834,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -843,7 +843,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -852,7 +852,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -861,7 +861,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -870,7 +870,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -879,7 +879,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -888,7 +888,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -897,7 +897,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -906,7 +906,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -915,7 +915,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -924,7 +924,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -933,7 +933,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -942,7 +942,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -951,7 +951,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -960,7 +960,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -969,7 +969,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -978,7 +978,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -987,7 +987,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -996,7 +996,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1005,7 +1005,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -1014,7 +1014,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1023,7 +1023,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -1032,7 +1032,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1041,7 +1041,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1050,7 +1050,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1059,7 +1059,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -1068,7 +1068,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -1086,7 +1086,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1095,7 +1095,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -1104,7 +1104,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1113,7 +1113,7 @@ // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -1122,7 +1122,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1131,7 +1131,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1140,7 +1140,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1149,7 +1149,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1158,7 +1158,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1176,7 +1176,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1185,7 +1185,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1194,7 +1194,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1203,7 +1203,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1212,7 +1212,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1221,7 +1221,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1230,7 +1230,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1239,7 +1239,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1248,7 +1248,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1266,7 +1266,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1275,7 +1275,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1284,7 +1284,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1293,7 +1293,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1302,7 +1302,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1311,7 +1311,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1320,7 +1320,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1329,7 +1329,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1338,7 +1338,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1356,7 +1356,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1365,7 +1365,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1374,7 +1374,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1383,7 +1383,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1392,7 +1392,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1401,7 +1401,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1410,7 +1410,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1419,7 +1419,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1428,7 +1428,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1446,7 +1446,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1455,7 +1455,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1464,7 +1464,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1473,7 +1473,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1482,7 +1482,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1491,7 +1491,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1500,7 +1500,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1509,7 +1509,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { @@ -1518,7 +1518,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { @@ -1536,7 +1536,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1545,7 +1545,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { @@ -1554,7 +1554,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1563,7 +1563,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { @@ -1572,7 +1572,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1581,7 +1581,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1590,7 +1590,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1599,7 +1599,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1608,7 +1608,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { @@ -1626,7 +1626,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1635,7 +1635,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { @@ -1644,7 +1644,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1653,7 +1653,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { @@ -1662,7 +1662,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1671,7 +1671,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { @@ -1680,7 +1680,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1689,7 +1689,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1698,7 +1698,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { @@ -1716,7 +1716,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1725,7 +1725,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { @@ -1734,7 +1734,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1743,7 +1743,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { @@ -1752,7 +1752,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1761,7 +1761,7 @@ // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, @@ -124,7 +124,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { @@ -133,7 +133,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, @@ -143,7 +143,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { @@ -152,7 +152,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, @@ -289,7 +289,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -298,7 +298,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, @@ -308,7 +308,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -317,7 +317,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, @@ -327,7 +327,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { @@ -336,7 +336,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -345,7 +345,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { @@ -354,7 +354,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -363,7 +363,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { @@ -372,7 +372,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -381,7 +381,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { @@ -390,7 +390,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, @@ -400,7 +400,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -409,7 +409,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, @@ -419,7 +419,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { @@ -428,7 +428,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, @@ -457,7 +457,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { @@ -466,7 +466,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, @@ -476,7 +476,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { @@ -485,7 +485,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, @@ -495,7 +495,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { @@ -504,7 +504,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, @@ -514,7 +514,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { @@ -523,7 +523,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, @@ -533,7 +533,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { @@ -542,7 +542,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, @@ -552,7 +552,7 @@ // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vncvt_x_x_w_u32m4 (vuint64m8_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) { @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) { @@ -97,7 +97,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) { @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) { @@ -115,7 +115,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) { @@ -124,7 +124,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) { @@ -133,7 +133,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) { @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) { @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) { @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) { @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) { @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) { @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_vneg_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vneg_v_i64m8 (vint64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) { @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) { @@ -97,7 +97,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) { @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) { @@ -115,7 +115,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) { @@ -124,7 +124,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) { @@ -133,7 +133,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) { @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) { @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) { @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) { @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) { @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) { @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) { @@ -205,7 +205,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) { @@ -214,7 +214,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) { @@ -223,7 +223,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) { @@ -232,7 +232,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) { @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) { @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) { @@ -259,7 +259,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) { @@ -268,7 +268,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) { @@ -277,7 +277,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) { @@ -286,7 +286,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) { @@ -295,7 +295,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) { @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) { @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) { @@ -322,7 +322,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) { @@ -331,7 +331,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) { @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) { @@ -349,7 +349,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) { @@ -358,7 +358,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) { @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) { @@ -376,7 +376,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) { @@ -385,7 +385,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) { @@ -394,7 +394,7 @@ // CHECK-RV64-LABEL: @test_vnot_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnot_v_u64m8 (vuint64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vor_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vor_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vrem_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vrem_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vremu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vremu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) { @@ -27,7 +27,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) { @@ -46,7 +46,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, @@ -56,7 +56,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) { @@ -65,7 +65,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) { @@ -74,7 +74,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) { @@ -83,7 +83,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) { @@ -92,7 +92,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) { @@ -101,7 +101,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) { @@ -110,7 +110,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) { @@ -119,7 +119,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) { @@ -128,7 +128,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) { @@ -137,7 +137,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, @@ -147,7 +147,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) { @@ -156,7 +156,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, @@ -166,7 +166,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) { @@ -175,7 +175,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, @@ -185,7 +185,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) { @@ -194,7 +194,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, @@ -223,7 +223,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) { @@ -232,7 +232,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) { @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, @@ -280,7 +280,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) { @@ -289,7 +289,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, @@ -299,7 +299,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) { @@ -308,7 +308,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, @@ -318,7 +318,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) { @@ -327,7 +327,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, @@ -337,7 +337,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) { @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, @@ -356,7 +356,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) { @@ -365,7 +365,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, @@ -394,7 +394,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) { @@ -403,7 +403,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, @@ -413,7 +413,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) { @@ -422,7 +422,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, @@ -432,7 +432,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) { @@ -441,7 +441,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, @@ -451,7 +451,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) { @@ -460,7 +460,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, @@ -470,7 +470,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) { @@ -479,7 +479,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) { @@ -488,7 +488,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) { @@ -497,7 +497,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) { @@ -506,7 +506,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) { @@ -515,7 +515,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) { @@ -524,7 +524,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) { @@ -533,7 +533,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) { @@ -542,7 +542,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) { @@ -551,7 +551,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, @@ -561,7 +561,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, @@ -571,7 +571,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, @@ -581,7 +581,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, @@ -601,7 +601,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) { @@ -610,7 +610,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, @@ -620,7 +620,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) { @@ -629,7 +629,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, @@ -658,7 +658,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) { @@ -667,7 +667,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, @@ -677,7 +677,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, @@ -687,7 +687,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, @@ -697,7 +697,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) { @@ -706,7 +706,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, @@ -716,7 +716,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) { @@ -725,7 +725,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, @@ -754,7 +754,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) { @@ -763,7 +763,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, @@ -773,7 +773,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) { @@ -782,7 +782,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) { @@ -801,7 +801,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, @@ -811,7 +811,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) { @@ -820,7 +820,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, @@ -830,7 +830,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) { @@ -839,7 +839,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, @@ -849,7 +849,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, @@ -859,7 +859,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, @@ -869,7 +869,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) { @@ -878,7 +878,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, @@ -888,7 +888,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) { @@ -897,7 +897,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, @@ -907,7 +907,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) { @@ -916,7 +916,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f32.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, @@ -926,7 +926,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f32.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) { @@ -935,7 +935,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, @@ -945,7 +945,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) { @@ -954,7 +954,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, @@ -964,7 +964,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) { @@ -973,7 +973,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, @@ -983,7 +983,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) { @@ -992,7 +992,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f64.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, @@ -1002,7 +1002,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f64.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) { @@ -1011,7 +1011,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, @@ -1021,7 +1021,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, @@ -1031,7 +1031,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, @@ -1041,7 +1041,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) { @@ -1050,7 +1050,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) { @@ -1059,7 +1059,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) { @@ -1068,7 +1068,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, @@ -1078,7 +1078,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, @@ -1088,7 +1088,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, @@ -1108,7 +1108,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, @@ -1118,7 +1118,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, @@ -1128,7 +1128,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, @@ -1138,7 +1138,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, @@ -1148,7 +1148,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, @@ -1158,7 +1158,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, @@ -1168,7 +1168,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, @@ -1178,7 +1178,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, @@ -1198,7 +1198,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, @@ -1208,7 +1208,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, @@ -1218,7 +1218,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, @@ -1228,7 +1228,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, @@ -1238,7 +1238,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, @@ -1248,7 +1248,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, @@ -1258,7 +1258,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, @@ -1268,7 +1268,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, @@ -1278,7 +1278,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -1288,7 +1288,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -1298,7 +1298,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, @@ -1308,7 +1308,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, @@ -1318,7 +1318,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, @@ -1328,7 +1328,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, @@ -1338,7 +1338,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -1348,7 +1348,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, @@ -1358,7 +1358,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, @@ -1378,7 +1378,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, @@ -1388,7 +1388,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, @@ -1398,7 +1398,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, @@ -1408,7 +1408,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, @@ -1418,7 +1418,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, @@ -1428,7 +1428,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, @@ -1438,7 +1438,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, @@ -1448,7 +1448,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, @@ -1458,7 +1458,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, @@ -1468,7 +1468,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, @@ -1478,7 +1478,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, @@ -1488,7 +1488,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, @@ -1498,7 +1498,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, @@ -1508,7 +1508,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2, @@ -3212,7 +3212,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgather_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { @@ -3221,7 +3221,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgather_vx_f16mf4 (vfloat16mf4_t op1, size_t index, size_t vl) { @@ -3230,7 +3230,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgather_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { @@ -3239,7 +3239,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgather_vx_f16mf2 (vfloat16mf2_t op1, size_t index, size_t vl) { @@ -3248,7 +3248,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgather_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t index, size_t vl) { @@ -3257,7 +3257,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgather_vx_f16m1 (vfloat16m1_t op1, size_t index, size_t vl) { @@ -3266,7 +3266,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgather_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t index, size_t vl) { @@ -3275,7 +3275,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgather_vx_f16m2 (vfloat16m2_t op1, size_t index, size_t vl) { @@ -3284,7 +3284,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgather_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t index, size_t vl) { @@ -3293,7 +3293,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgather_vx_f16m4 (vfloat16m4_t op1, size_t index, size_t vl) { @@ -3302,7 +3302,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32f16.i64( undef, [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgather_vv_f16m8 (vfloat16m8_t op1, vuint16m8_t index, size_t vl) { @@ -3311,7 +3311,7 @@ // CHECK-RV64-LABEL: @test_vrgather_vx_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32f16.i64( undef, [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgather_vx_f16m8 (vfloat16m8_t op1, size_t index, size_t vl) { @@ -3320,7 +3320,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgatherei16_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -3329,7 +3329,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgatherei16_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -3338,7 +3338,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgatherei16_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -3347,7 +3347,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgatherei16_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -3356,7 +3356,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgatherei16_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -3365,7 +3365,7 @@ // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgatherei16_vv_f16m8 (vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { @@ -125,7 +125,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, @@ -217,7 +217,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, @@ -227,7 +227,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, @@ -237,7 +237,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { @@ -246,7 +246,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { @@ -255,7 +255,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { @@ -264,7 +264,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { @@ -273,7 +273,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, @@ -283,7 +283,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, @@ -293,7 +293,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, @@ -323,7 +323,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, @@ -343,7 +343,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, @@ -353,7 +353,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, @@ -363,7 +363,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, @@ -373,7 +373,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, @@ -383,7 +383,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, @@ -403,7 +403,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, @@ -413,7 +413,7 @@ // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { @@ -125,7 +125,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, @@ -135,7 +135,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { @@ -144,7 +144,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { @@ -153,7 +153,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { @@ -162,7 +162,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { @@ -171,7 +171,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { @@ -180,7 +180,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { @@ -207,7 +207,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { @@ -216,7 +216,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { @@ -243,7 +243,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( undef, [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, @@ -280,7 +280,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, @@ -290,7 +290,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, @@ -300,7 +300,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, @@ -310,7 +310,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, @@ -320,7 +320,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( undef, [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, @@ -350,7 +350,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, @@ -370,7 +370,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( undef, [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, @@ -380,7 +380,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, @@ -390,7 +390,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, @@ -400,7 +400,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, @@ -410,7 +410,7 @@ // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( undef, [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vsll_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vsll_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vsra_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vsra_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -233,7 +233,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vssra_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vssra_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -233,7 +233,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, @@ -252,7 +252,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -261,7 +261,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -270,7 +270,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -297,7 +297,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -306,7 +306,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -315,7 +315,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -342,7 +342,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -351,7 +351,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -387,7 +387,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -396,7 +396,7 @@ // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vssub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vssub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -593,7 +593,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -602,7 +602,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -611,7 +611,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -620,7 +620,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -629,7 +629,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -657,7 +657,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -666,7 +666,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -675,7 +675,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -693,7 +693,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -756,7 +756,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -765,7 +765,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -774,7 +774,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -783,7 +783,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -792,7 +792,7 @@ // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vsub_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vsub_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, @@ -556,7 +556,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -565,7 +565,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, @@ -613,7 +613,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { @@ -622,7 +622,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -631,7 +631,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -640,7 +640,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { @@ -649,7 +649,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { @@ -658,7 +658,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -667,7 +667,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -676,7 +676,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { @@ -685,7 +685,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { @@ -694,7 +694,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -703,7 +703,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -712,7 +712,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { @@ -721,7 +721,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { @@ -730,7 +730,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -739,7 +739,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -748,7 +748,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { @@ -757,7 +757,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { @@ -766,7 +766,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, @@ -776,7 +776,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -785,7 +785,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -795,7 +795,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { @@ -804,7 +804,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, @@ -814,7 +814,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -823,7 +823,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { @@ -832,7 +832,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { @@ -841,7 +841,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -850,7 +850,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -859,7 +859,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { @@ -868,7 +868,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { @@ -877,7 +877,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -886,7 +886,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -895,7 +895,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { @@ -904,7 +904,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { @@ -913,7 +913,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -922,7 +922,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -931,7 +931,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { @@ -940,7 +940,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { @@ -949,7 +949,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, @@ -959,7 +959,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -968,7 +968,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { @@ -977,7 +977,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { @@ -986,7 +986,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -995,7 +995,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1004,7 +1004,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { @@ -1013,7 +1013,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { @@ -1022,7 +1022,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1031,7 +1031,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1040,7 +1040,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { @@ -1049,7 +1049,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { @@ -1058,7 +1058,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1076,7 +1076,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,7 +1085,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( undef, [[SRC:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( undef, [[SRC:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( undef, [[SRC:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) { @@ -798,7 +798,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -807,7 +807,7 @@ // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, @@ -556,7 +556,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -565,7 +565,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, @@ -613,7 +613,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { @@ -622,7 +622,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -631,7 +631,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -640,7 +640,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { @@ -649,7 +649,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { @@ -658,7 +658,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -667,7 +667,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -676,7 +676,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { @@ -685,7 +685,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { @@ -694,7 +694,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -703,7 +703,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -712,7 +712,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { @@ -721,7 +721,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { @@ -730,7 +730,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -739,7 +739,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -748,7 +748,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { @@ -757,7 +757,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { @@ -766,7 +766,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, @@ -776,7 +776,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -785,7 +785,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -795,7 +795,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { @@ -804,7 +804,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, @@ -814,7 +814,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -823,7 +823,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { @@ -832,7 +832,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { @@ -841,7 +841,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -850,7 +850,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -859,7 +859,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { @@ -868,7 +868,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { @@ -877,7 +877,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -886,7 +886,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -895,7 +895,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { @@ -904,7 +904,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { @@ -913,7 +913,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -922,7 +922,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -931,7 +931,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { @@ -940,7 +940,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { @@ -949,7 +949,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, @@ -959,7 +959,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -968,7 +968,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { @@ -977,7 +977,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { @@ -986,7 +986,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -995,7 +995,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1004,7 +1004,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { @@ -1013,7 +1013,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { @@ -1022,7 +1022,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1031,7 +1031,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1040,7 +1040,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { @@ -1049,7 +1049,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { @@ -1058,7 +1058,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1076,7 +1076,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,7 +1085,7 @@ // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -564,7 +564,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -573,7 +573,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -591,7 +591,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -600,7 +600,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -636,7 +636,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -645,7 +645,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -672,7 +672,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -681,7 +681,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -690,7 +690,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -717,7 +717,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -726,7 +726,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -735,7 +735,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -744,7 +744,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -753,7 +753,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -762,7 +762,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -771,7 +771,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -780,7 +780,7 @@ // CHECK-RV64-LABEL: @test_vxor_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vxor_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -162,6 +162,7 @@ bool IsMask; bool HasVL; bool HasPolicy; + bool HasNoMaskPassThru; bool HasNoMaskedOverloaded; bool HasAutoDef; // There is automiatic definition in header std::string ManualCodegen; @@ -177,8 +178,8 @@ RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName, StringRef MangledSuffix, StringRef IRName, bool IsMask, bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, - bool HasNoMaskedOverloaded, bool HasAutoDef, - StringRef ManualCodegen, const RVVTypes &Types, + bool HasNoMaskPassThru, bool HasNoMaskedOverloaded, + bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types, const std::vector &IntrinsicTypes, const std::vector &RequiredFeatures, unsigned NF); ~RVVIntrinsic() = default; @@ -188,6 +189,7 @@ StringRef getMangledName() const { return MangledName; } bool hasVL() const { return HasVL; } bool hasPolicy() const { return HasPolicy; } + bool hasNoMaskPassThru() const { return HasNoMaskPassThru; } bool hasNoMaskedOverloaded() const { return HasNoMaskedOverloaded; } bool hasManualCodegen() const { return !ManualCodegen.empty(); } bool hasAutoDef() const { return HasAutoDef; } @@ -770,12 +772,14 @@ StringRef NewMangledName, StringRef MangledSuffix, StringRef IRName, bool IsMask, bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, - bool HasNoMaskedOverloaded, bool HasAutoDef, - StringRef ManualCodegen, const RVVTypes &OutInTypes, + bool HasNoMaskPassThru, bool HasNoMaskedOverloaded, + bool HasAutoDef, StringRef ManualCodegen, + const RVVTypes &OutInTypes, const std::vector &NewIntrinsicTypes, const std::vector &RequiredFeatures, unsigned NF) : IRName(IRName), IsMask(IsMask), HasVL(HasVL), HasPolicy(HasPolicy), + HasNoMaskPassThru(HasNoMaskPassThru), HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef), ManualCodegen(ManualCodegen.str()), NF(NF) { @@ -823,7 +827,7 @@ // IntrinsicTypes is nonmasked version index. Need to update it // if there is maskedoff operand (It is always in first operand). IntrinsicTypes = NewIntrinsicTypes; - if (IsMask && HasMaskedOffOperand) { + if ((IsMask && HasMaskedOffOperand) || (!IsMask && HasNoMaskPassThru)) { for (auto &I : IntrinsicTypes) { if (I >= 0) I += NF; @@ -860,6 +864,9 @@ } else { OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n"; } + } else if (hasNoMaskPassThru()) { + OS << " Ops.push_back(llvm::UndefValue::get(ResultType));\n"; + OS << " std::rotate(Ops.rbegin(), Ops.rbegin() + 1, Ops.rend());\n"; } OS << " IntrinsicTypes = {"; @@ -1107,6 +1114,8 @@ PrintFatalError("Builtin with same name has different HasPolicy"); else if (P.first->second->hasPolicy() != Def->hasPolicy()) PrintFatalError("Builtin with same name has different HasPolicy"); + else if (P.first->second->hasNoMaskPassThru() != Def->hasNoMaskPassThru()) + PrintFatalError("Builtin with same name has different HasNoMaskPassThru"); else if (P.first->second->getIntrinsicTypes() != Def->getIntrinsicTypes()) PrintFatalError("Builtin with same name has different IntrinsicTypes"); } @@ -1154,6 +1163,7 @@ bool HasMaskedOffOperand = R->getValueAsBit("HasMaskedOffOperand"); bool HasVL = R->getValueAsBit("HasVL"); bool HasPolicy = R->getValueAsBit("HasPolicy"); + bool HasNoMaskPassThru = R->getValueAsBit("HasNoMaskPassThru"); bool HasNoMaskedOverloaded = R->getValueAsBit("HasNoMaskedOverloaded"); std::vector Log2LMULList = R->getValueAsListOfInts("Log2LMUL"); StringRef ManualCodegen = R->getValueAsString("ManualCodegen"); @@ -1228,8 +1238,8 @@ Out.push_back(std::make_unique( Name, SuffixStr, MangledName, MangledSuffixStr, IRName, /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasPolicy, - HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(), - IntrinsicTypes, RequiredFeatures, NF)); + HasNoMaskPassThru, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, + Types.getValue(), IntrinsicTypes, RequiredFeatures, NF)); if (HasMask) { // Create a mask intrinsic Optional MaskTypes = @@ -1237,8 +1247,9 @@ Out.push_back(std::make_unique( Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask, /*IsMask=*/true, HasMaskedOffOperand, HasVL, HasPolicy, - HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, - MaskTypes.getValue(), IntrinsicTypes, RequiredFeatures, NF)); + HasNoMaskPassThru, HasNoMaskedOverloaded, HasAutoDef, + ManualCodegenMask, MaskTypes.getValue(), IntrinsicTypes, + RequiredFeatures, NF)); } } // end for Log2LMULList } // end for TypeRange diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -383,12 +383,13 @@ let VLOperand = 2; } // For destination vector type is the same as first and second source vector. - // Input: (vector_in, int_vector_in, vl) + // Input: (passthru, vector_in, int_vector_in, vl) class RISCVRGatherVVNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, + LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first and second source vector. // Input: (vector_in, vector_in, int_vector_in, vl, ta) @@ -400,13 +401,14 @@ [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 4; } - // Input: (vector_in, int16_vector_in, vl) + // Input: (passthru, vector_in, int16_vector_in, vl) class RISCVRGatherEI16VVNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, + [LLVMMatchType<0>, LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first and second source vector. // Input: (vector_in, vector_in, int16_vector_in, vl, ta) @@ -421,12 +423,13 @@ } // For destination vector type is the same as first source vector, and the // second operand is XLen. - // Input: (vector_in, xlen_in, vl) + // Input: (passthru, vector_in, xlen_in, vl) class RISCVGatherVXNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, + LLVMMatchType<1>], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first source vector (with mask). // Second operand is XLen. @@ -440,13 +443,14 @@ let VLOperand = 4; } // For destination vector type is the same as first source vector. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVBinaryAAXNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let SplatOperand = 1; - let VLOperand = 2; + let SplatOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first source vector (with mask). // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) @@ -461,12 +465,13 @@ } // For destination vector type is the same as first source vector. The // second source operand must match the destination type or be an XLen scalar. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVBinaryAAShiftNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first source vector (with mask). // The second source operand must match the destination type or be an XLen scalar. @@ -480,13 +485,14 @@ let VLOperand = 4; } // For destination vector type is NOT the same as first source vector. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVBinaryABXNoMask : Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let SplatOperand = 1; - let VLOperand = 2; + let SplatOperand = 2; + let VLOperand = 3; } // For destination vector type is NOT the same as first source vector (with mask). // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) @@ -501,12 +507,13 @@ } // For destination vector type is NOT the same as first source vector. The // second source operand must match the destination type or be an XLen scalar. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVBinaryABShiftNoMask : Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is NOT the same as first source vector (with mask). // The second source operand must match the destination type or be an XLen scalar. @@ -595,13 +602,14 @@ } // For Saturating binary operations. // The destination vector type is the same as first source vector. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVSaturatingBinaryAAXNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { - let SplatOperand = 1; - let VLOperand = 2; + let SplatOperand = 2; + let VLOperand = 3; } // For Saturating binary operations with mask. // The destination vector type is the same as first source vector. @@ -618,12 +626,13 @@ // For Saturating binary operations. // The destination vector type is the same as first source vector. // The second source operand matches the destination type or is an XLen scalar. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVSaturatingBinaryAAShiftNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For Saturating binary operations with mask. // The destination vector type is the same as first source vector. @@ -640,12 +649,13 @@ // For Saturating binary operations. // The destination vector type is NOT the same as first source vector. // The second source operand matches the destination type or is an XLen scalar. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVSaturatingBinaryABShiftNoMask : Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For Saturating binary operations with mask. // The destination vector type is NOT the same as first source vector (with mask). diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -155,9 +155,9 @@ // and the fifth the VL. VSLIDEUP_VL, VSLIDEDOWN_VL, - // Matches the semantics of vslide1up/slide1down. The first operand is the - // source vector, the second is the XLenVT scalar value. The third and fourth - // operands are the mask and VL operands. + // Matches the semantics of vslide1up/slide1down. The first operand is + // passthru operand, the second is source vector, third is the XLenVT scalar + // value. The fourth and fifth operands are the mask and VL operands. VSLIDE1UP_VL, VSLIDE1DOWN_VL, // Matches the semantics of the vid.v instruction, with a mask and VL diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4475,10 +4475,12 @@ ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero, InsertI64VL); // First slide in the hi value, then the lo in underneath it. - ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec, - ValHi, I32Mask, InsertI64VL); - ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec, - ValLo, I32Mask, InsertI64VL); + ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, + DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi, + I32Mask, InsertI64VL); + ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, + DAG.getUNDEF(I32ContainerVT), ValInVec, ValLo, + I32Mask, InsertI64VL); // Bitcast back to the right container type. ValInVec = DAG.getBitcast(ContainerVT, ValInVec); } @@ -4774,8 +4776,7 @@ // We need to special case these when the scalar is larger than XLen. unsigned NumOps = Op.getNumOperands(); bool IsMasked = NumOps == 7; - unsigned OpOffset = IsMasked ? 1 : 0; - SDValue Scalar = Op.getOperand(2 + OpOffset); + SDValue Scalar = Op.getOperand(3); if (Scalar.getValueType().bitsLE(XLenVT)) break; @@ -4790,7 +4791,7 @@ // Convert the vector source to the equivalent nxvXi32 vector. MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2); - SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset)); + SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(2)); SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, DAG.getConstant(0, DL, XLenVT)); @@ -4807,17 +4808,34 @@ // Shift the two scalar parts in using SEW=32 slide1up/slide1down // instructions. - if (IntNo == Intrinsic::riscv_vslide1up || - IntNo == Intrinsic::riscv_vslide1up_mask) { - Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi, - I32Mask, I32VL); - Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo, - I32Mask, I32VL); + SDValue Passthru = DAG.getBitcast(I32VT, Op.getOperand(1)); + if (!IsMasked) { + if (IntNo == Intrinsic::riscv_vslide1up) { + Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec, + ScalarHi, I32Mask, I32VL); + Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec, + ScalarLo, I32Mask, I32VL); + } else { + Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec, + ScalarLo, I32Mask, I32VL); + Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec, + ScalarHi, I32Mask, I32VL); + } } else { - Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo, - I32Mask, I32VL); - Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi, - I32Mask, I32VL); + // TODO Those VSLIDE1 could be TAMA because we use vmerge to select + // maskedoff + SDValue Undef = DAG.getUNDEF(I32VT); + if (IntNo == Intrinsic::riscv_vslide1up_mask) { + Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Undef, Vec, + ScalarHi, I32Mask, I32VL); + Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Undef, Vec, + ScalarLo, I32Mask, I32VL); + } else { + Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Undef, Vec, + ScalarLo, I32Mask, I32VL); + Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Undef, Vec, + ScalarHi, I32Mask, I32VL); + } } // Convert back to nxvXi64. @@ -4825,11 +4843,21 @@ if (!IsMasked) return Vec; - // Apply mask after the operation. SDValue Mask = Op.getOperand(NumOps - 3); SDValue MaskedOff = Op.getOperand(1); - return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL); + // Assume Policy operand is the last operand. + uint64_t Policy = Op.getConstantOperandVal(NumOps - 1); + // We don't need to select maskedoff if it's undef. + if (MaskedOff.isUndef()) + return Vec; + // TAMU + if (Policy == RISCVII::TAIL_AGNOSTIC) + return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, + VL); + // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma. + // It's fine because vmerge does not care mask policy. + return DAG.getNode(RISCVISD::VP_MERGE_VL, DL, VT, Mask, Vec, MaskedOff, VL); } } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -996,6 +996,24 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoBinaryNoMaskTU : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let HasMergeOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + // Special version of VPseudoBinaryNoMask where we pretend the first source is // tied to the destination. // This allows maskedoff and rs2 to be the same register. @@ -1017,6 +1035,25 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoTiedBinaryNoMaskTU : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, + Op2Class:$rs1, + AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 0; // Merge is also rs2. + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoIStoreNoMask LMUL, bit Ordered>: Pseudo<(outs), @@ -1652,6 +1689,8 @@ let VLMul = MInfo.value in { def "_" # MInfo.MX : VPseudoBinaryNoMask; + def "_" # MInfo.MX # "_TU" : VPseudoBinaryNoMaskTU; def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy, RISCVMaskedPseudo; @@ -1681,6 +1720,8 @@ let VLMul = lmul.value in { def "_" # lmul.MX # "_" # emul.MX : VPseudoBinaryNoMask; + def "_" # lmul.MX # "_" # emul.MX # "_TU": VPseudoBinaryNoMaskTU; def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy; } @@ -1693,6 +1734,8 @@ let VLMul = MInfo.value in { def "_" # MInfo.MX # "_TIED": VPseudoTiedBinaryNoMask; + def "_" # MInfo.MX # "_TIED_TU": VPseudoTiedBinaryNoMaskTU; def "_" # MInfo.MX # "_MASK_TIED" : VPseudoTiedBinaryMask; } @@ -2820,14 +2863,14 @@ (mask_type VR:$rs2), GPR:$vl, sew)>; -class VPatBinaryNoMask : +class VPatBinaryM : Pat<(result_type (!cast(intrinsic_name) (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), @@ -2837,6 +2880,44 @@ (op2_type op2_kind:$rs2), GPR:$vl, sew)>; +class VPatBinaryNoMaskTA : + Pat<(result_type (!cast(intrinsic_name) + (result_type (undef)), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + VLOpFrag)), + (!cast(inst) + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + GPR:$vl, sew)>; + +class VPatBinaryNoMaskTU : + Pat<(result_type (!cast(intrinsic_name) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + VLOpFrag)), + (!cast(inst#"_TU") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + GPR:$vl, sew)>; + // Same as above but source operands are swapped. class VPatBinaryNoMaskSwapped : Pat<(result_type (!cast(intrinsic_name) + (result_type (undef)), (result_type result_reg_class:$rs1), (op2_type op2_kind:$rs2), VLOpFrag)), @@ -2938,6 +3020,23 @@ (op2_type op2_kind:$rs2), GPR:$vl, sew)>; +class VPatTiedBinaryNoMaskTU : + Pat<(result_type (!cast(intrinsic_name) + (result_type result_reg_class:$merge), + (result_type result_reg_class:$merge), + (op2_type op2_kind:$rs2), + VLOpFrag)), + (!cast(inst#"_TIED_TU") + (result_type result_reg_class:$merge), + (op2_type op2_kind:$rs2), + GPR:$vl, sew)>; + class VPatTiedBinaryMask; } -multiclass VPatBinary { - def : VPatBinaryNoMask; + def : VPatBinaryM; def : VPatBinaryMask; @@ -3176,8 +3275,10 @@ VReg op1_reg_class, DAGOperand op2_kind> { - def : VPatBinaryNoMask; + def : VPatBinaryNoMaskTA; + def : VPatBinaryNoMaskTU; def : VPatBinaryMaskTA; @@ -3349,9 +3450,9 @@ multiclass VPatBinaryM_MM { foreach mti = AllMasks in - def : VPatBinaryNoMask; + def : VPatBinaryM; } multiclass VPatBinaryW_VV; - let AddedComplexity = 1 in + def : VPatBinaryNoMaskTU; + let AddedComplexity = 1 in { + def : VPatTiedBinaryNoMaskTU; def : VPatTiedBinaryMask; + } def : VPatBinaryMaskTA vtilist> { foreach vti = vtilist in - defm : VPatBinary; + defm : VPatBinaryM; } multiclass VPatBinarySwappedM_VV vtilist> { foreach vti = vtilist in { defvar kind = "V"#vti.ScalarSuffix; - defm : VPatBinary; + defm : VPatBinaryM; } } multiclass VPatBinaryM_VI vtilist> { foreach vti = vtilist in - defm : VPatBinary; + defm : VPatBinaryM; } multiclass VPatBinaryV_VV_VX_VI("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$merge), + (vti.Vector vti.RegClass:$rs2), + (vti.Vector vti.RegClass:$rs1), + VLOpFrag)), + (!cast("PseudoVSUB_VV_"#vti.LMul.MX#"_TU") + vti.RegClass:$merge, + vti.RegClass:$rs1, + vti.RegClass:$rs2, + GPR:$vl, + vti.Log2SEW)>; def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge), (vti.Vector vti.RegClass:$rs2), (vti.Vector vti.RegClass:$rs1), @@ -4098,7 +4217,8 @@ (XLenVT timm:$policy))>; // Match VSUB with a small immediate to vadd.vi by negating the immediate. - def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$rs1), + def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector (undef)), + (vti.Vector vti.RegClass:$rs1), (vti.Scalar simm5_plus1:$rs2), VLOpFrag)), (!cast("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1, @@ -4684,7 +4804,8 @@ foreach vti = AllIntegerVectors in { // Emit shift by 1 as an add since it might be faster. - def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector vti.RegClass:$rs1), + def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector undef), + (vti.Vector vti.RegClass:$rs1), (XLenVT 1), VLOpFrag)), (!cast("PseudoVADD_VV_"#vti.LMul.MX) vti.RegClass:$rs1, vti.RegClass:$rs1, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -1643,9 +1643,10 @@ SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT> ]>; -def SDTRVVSlide1 : SDTypeProfile<1, 4, [ - SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisInt<0>, SDTCisVT<2, XLenVT>, - SDTCVecEltisVT<3, i1>, SDTCisSameNumEltsAs<0, 3>, SDTCisVT<4, XLenVT> +def SDTRVVSlide1 : SDTypeProfile<1, 5, [ + SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisInt<0>, + SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, + SDTCisVT<5, XLenVT> ]>; def riscv_slideup_vl : SDNode<"RISCVISD::VSLIDEUP_VL", SDTRVVSlide, []>; @@ -1660,16 +1661,30 @@ VLOpFrag)), (!cast("PseudoVID_V_"#vti.LMul.MX) GPR:$vl, vti.Log2SEW)>; - def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector vti.RegClass:$rs1), + def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector undef), + (vti.Vector vti.RegClass:$rs1), GPR:$rs2, (vti.Mask true_mask), VLOpFrag)), (!cast("PseudoVSLIDE1UP_VX_"#vti.LMul.MX) vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; - def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector vti.RegClass:$rs1), + def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector vti.RegClass:$rd), + (vti.Vector vti.RegClass:$rs1), GPR:$rs2, (vti.Mask true_mask), VLOpFrag)), + (!cast("PseudoVSLIDE1UP_VX_"#vti.LMul.MX#"_TU") + vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector undef), + (vti.Vector vti.RegClass:$rs1), + GPR:$rs2, (vti.Mask true_mask), + VLOpFrag)), (!cast("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX) vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector vti.RegClass:$rd), + (vti.Vector vti.RegClass:$rs1), + GPR:$rs2, (vti.Mask true_mask), + VLOpFrag)), + (!cast("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX#"_TU") + vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; } foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in { diff --git a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll --- a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll @@ -23,6 +23,7 @@ } declare @llvm.riscv.vadd.nxv1i64.nxv1i64( + , , , i64); @@ -54,6 +55,7 @@ %len = load i64, i64* %local %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64( + undef, %v1, %v2, i64 %len) diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll @@ -0,0 +1,115 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ +; RUN: < %s | FileCheck %s + +declare @llvm.riscv.vslide1down.mask.nxv1i64.i64( + , + , + i64, + , + i32, + i32); + +define @intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu +; CHECK-NEXT: vslide1down.vx v9, v9, a0 +; CHECK-NEXT: vslide1down.vx v9, v9, a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1down.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4, i32 0) + + ret %a +} + +define @intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu +; CHECK-NEXT: vslide1down.vx v9, v9, a0 +; CHECK-NEXT: vslide1down.vx v9, v9, a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1down.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4, i32 1) + + ret %a +} + + +; Fallback vslide1 to mask undisturbed until InsertVSETVLI supports mask agnostic. +define @intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu +; CHECK-NEXT: vslide1down.vx v9, v9, a0 +; CHECK-NEXT: vslide1down.vx v9, v9, a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1down.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4, i32 2) + + ret %a +} + +; Fallback vslide1 to mask undisturbed until InsertVSETVLI supports mask agnostic. +define @intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a2, a2, 1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-NEXT: vslide1down.vx v8, v8, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1down.mask.nxv1i64.i64( + undef, + %0, + i64 %1, + %2, + i32 %3, i32 3) + + ret %a +} + +define @intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a2, a2, 1 +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-NEXT: vslide1down.vx v8, v8, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1down.mask.nxv1i64.i64( + undef, + %0, + i64 %1, + undef, + i32 %2, i32 3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll @@ -79,11 +79,11 @@ ; SPILL-O2-NEXT: addi sp, sp, 16 ; SPILL-O2-NEXT: ret { - %x = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i32 %gvl) + %x = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i32 %gvl) %call = call signext i32 @puts(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i64 0, i64 0)) - %z = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %x, i32 %gvl) + %z = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %x, i32 %gvl) ret %z } -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i32 %gvl) +declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %passthru, %a, %b, i32 %gvl) declare i32 @puts(i8*); diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll @@ -79,11 +79,11 @@ ; SPILL-O2-NEXT: addi sp, sp, 32 ; SPILL-O2-NEXT: ret { - %x = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %gvl) + %x = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %gvl) %call = call signext i32 @puts(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i64 0, i64 0)) - %z = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %x, i64 %gvl) + %z = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %x, i64 %gvl) ret %z } -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %gvl) +declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %passthru, %a, %b, i64 %gvl) declare i32 @puts(i8*); diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ -; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=RV32 -; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ -; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=RV64 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefix=RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefix=RV64 declare @llvm.riscv.vle.nxv1i8( , @@ -118,3 +118,2088 @@ ret %a } + +declare @llvm.riscv.vaadd.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vaadd.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vaadd.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vaadd.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vaaddu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vaaddu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vaaddu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vadd.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vadd.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} +declare @llvm.riscv.vand.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vand.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vand.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vand.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vasub.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vasub.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vasub.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vasub.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vasubu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vasubu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vasubu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vasubu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vdiv.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vdiv.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vdiv.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vdivu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vdivu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vdivu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfadd.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfadd.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfdiv.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfdiv.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfdiv.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfmax.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfmax.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfmin.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfmin.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfmul.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfmul.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv1f16.f16( + , + , + half, + iXLen); + +define @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfrdiv.vf v8, v9, fa0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfrdiv.vf v8, v9, fa0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfrdiv.nxv1f16.f16( + %0, + %1, + half %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfsgnjn.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfsgnjn.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfsgnjx.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfsgnjx.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv1f16.f16( + , + , + half, + iXLen); + +define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfrsub.vf v8, v9, fa0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfrsub.vf v8, v9, fa0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.nxv1f16.f16( + %0, + %1, + half %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv1f16.f16( + , + , + half, + iXLen); + +define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfslide1down.vf v8, v9, fa0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfslide1down.vf v8, v9, fa0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.nxv1f16.f16( + %0, + %1, + half %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv1f16.f16( + , + , + half, + iXLen); + +define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfslide1up.vf v8, v9, fa0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfslide1up.vf v8, v9, fa0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.nxv1f16.f16( + %0, + %1, + half %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwsub.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwsub.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwsub.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwsub.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( + , + , + , + iXLen); + +define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vl4re16.v v24, (a0) +; RV32-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; RV32-NEXT: vfwsub.wv v8, v16, v24 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vl4re16.v v24, (a0) +; RV64-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; RV64-NEXT: vfwsub.wv v8, v16, v24 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwmul.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwmul.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwadd.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwadd.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfwadd.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfwadd.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv1f16.nxv1f16( + , + , + , + iXLen); + +define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV32-NEXT: vfsub.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; RV64-NEXT: vfsub.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv1f16.nxv1f16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + + +declare @llvm.riscv.vslide1down.nxv1i64( + , + , + i64, + iXLen); + +define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: slli a2, a2, 1 +; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, mu +; RV32-NEXT: vmv1r.v v10, v8 +; RV32-NEXT: vslide1down.vx v10, v9, a0 +; RV32-NEXT: vslide1down.vx v8, v10, a1 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vslide1down.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1down.nxv1i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv1i64.i64( + , + , + i64, + iXLen); + +define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: slli a2, a2, 1 +; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, mu +; RV32-NEXT: vmv1r.v v10, v8 +; RV32-NEXT: vslide1up.vx v10, v9, a1 +; RV32-NEXT: vslide1up.vx v8, v10, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vslide1up.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vslide1up.nxv1i64.i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmax.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmax.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmax.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmax.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmaxu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmaxu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmaxu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmin.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmin.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmin.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmin.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vminu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vminu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vminu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vminu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmul.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmul.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmul.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmul.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmulh.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmulh.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmulh.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmulhsu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmulhsu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vmulhu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vmulhu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vnclip.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vnclip.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vnclipu.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vnclipu.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vnsra.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vnsra.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vnsrl.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vnsrl.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vor.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vor.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vor.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vor.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vrem.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vrem.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vrem.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrem.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vremu.nxv1i8.nxv1i8( + , + , + , + iXLen); +declare @llvm.riscv.vrgather.vv.nxv1i8.i32( + , + , + , + iXLen); + +define @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vrgather.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vrgather.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.nxv1i8.i32( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vrgather.vx.nxv1i8( + , + , + iXLen, + iXLen); + +define @intrinsic_vrgather_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; RV32-NEXT: vrgather.vx v8, v9, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; RV64-NEXT: vrgather.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.nxv1i8( + %0, + %1, + iXLen %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vrgatherei16.vv.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vrgatherei16.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vrgatherei16.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrgatherei16.vv.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv1i64.i64( + , + , + i64, + iXLen); + +define @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vsub.vv v8, v10, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vrsub.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrsub.nxv1i64.i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv1i64.i64( + , + , + i64, + iXLen); + +define @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vsadd.vv v8, v9, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsadd.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsadd.nxv1i64.i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsaddu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsaddu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsll.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsll.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsll.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsll.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsmul.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsmul.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsmul.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsmul.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsmul.nxv1i64.i64( + , + , + i64, + iXLen); + +define @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vsmul.vv v8, v9, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsmul.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsmul.nxv1i64.i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsra.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsra.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsra.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsra.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} +declare @llvm.riscv.vsrl.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsrl.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsrl.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsrl.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vssra.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vssra.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vssra.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vssra.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vssrl.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vssrl.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vssrl.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vssrl.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vssub.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vssub.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vssub.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vssub.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vssubu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vssubu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vssubu.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vssub.nxv1i64.i64( + , + , + i64, + iXLen); + +define @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vssub.vv v8, v9, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vssub.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vssub.nxv1i64.i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv1i64.i64( + , + , + i64, + iXLen); + +define @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vssubu.vv v8, v9, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vssubu.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vssubu.nxv1i64.i64( + %0, + %1, + i64 %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vsub.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsub.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vsub.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vsub.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwadd.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwadd.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwadd.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwadd.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwaddu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwaddu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwmul.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwmul.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwmulu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwmulu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwmulsu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwmulsu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwsub.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwsub.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwsub.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwsub.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwsub.wv v8, v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwsub.wv v8, v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + %0, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwsubu.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwsubu.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vwsubu.wv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vwsubu.wv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vxor.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vxor.vv v8, v9, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV64-NEXT: vxor.vv v8, v9, v10 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vxor.nxv1i8.nxv1i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vaadd.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vaadd.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vaadd.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vaadd.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vaadd.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vaadd.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vaadd.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vaadd.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vaadd.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vaadd.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vaadd.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vaadd.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vaadd.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vaadd.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vaadd.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vaadd.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vaadd.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vaadd.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vaadd.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vaadd.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vaadd.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vaadd.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vaadd.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vaadd.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vaadd.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vaadd.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vaadd.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vaadd.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vaadd.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vaadd.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vaadd.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vaadd.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vaadd.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vaadd.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vaadd.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vaadd.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vaadd.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vaadd.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vaadd.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vaadd.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vaadd.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vaadd.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vaadd.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vaadd.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vaadd.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vaadd.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vaadd.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vaadd.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vaadd.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vaadd.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vaadd.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vaadd.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vaadd.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vaadd.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vaadd.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vaadd.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vaadd.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vaadd.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vaadd.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vaadd.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vaadd.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vaadd.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vaadd.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vaadd.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vaadd.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vaadd.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vaadd.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vaadd.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vaadd.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vaadd.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vaadd.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vaadd.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vaadd.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vaadd.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vaadd.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vaadd.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vaadd.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vaadd.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vaadd.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vaadd.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vaadd.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vaadd.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vaadd.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vaadd.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vaadd.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vaadd.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vaadd.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vaadd.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vaaddu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vaaddu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vaaddu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vaaddu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vaaddu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vaaddu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vaaddu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vaaddu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vaaddu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vaaddu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vaaddu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vaaddu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vaaddu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vaaddu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vaaddu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vaaddu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vaaddu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vaaddu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vaaddu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vaaddu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vaaddu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vaaddu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vaaddu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vaaddu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vaaddu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vaaddu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vaaddu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vaaddu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vaaddu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vaaddu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vaaddu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vaaddu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vaaddu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vaaddu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vaaddu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vaaddu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vaaddu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vaaddu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vaaddu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vaaddu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vaaddu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vaaddu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vaaddu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vaaddu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vaaddu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vaaddu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vaaddu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vaaddu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vaaddu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vaaddu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vaaddu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vaaddu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vaaddu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vaaddu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vaaddu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vaaddu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vaaddu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vaaddu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vaaddu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vaaddu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vaaddu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vaaddu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vaaddu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vaaddu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vaaddu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vaaddu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vaaddu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vaaddu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vaaddu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vaaddu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vaaddu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vaaddu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vaaddu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vaaddu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vaaddu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vaaddu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vaaddu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vaaddu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vaaddu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vaaddu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vaaddu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vaaddu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vaaddu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vaaddu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vaaddu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vaaddu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vaaddu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vaaddu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaaddu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll @@ -3,6 +3,7 @@ ; RUN: --riscv-no-aliases < %s | FileCheck %s declare @llvm.riscv.vadd.nxv8i8.nxv8i8( + , , , i64); @@ -15,6 +16,7 @@ ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vadd.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vadd.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -46,6 +48,7 @@ } declare @llvm.riscv.vadd.nxv2i8.nxv2i8( + , , , i32); @@ -58,6 +61,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -90,6 +94,7 @@ } declare @llvm.riscv.vadd.nxv4i8.nxv4i8( + , , , i32); @@ -102,6 +107,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -134,6 +140,7 @@ } declare @llvm.riscv.vadd.nxv8i8.nxv8i8( + , , , i32); @@ -146,6 +153,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -178,6 +186,7 @@ } declare @llvm.riscv.vadd.nxv16i8.nxv16i8( + , , , i32); @@ -190,6 +199,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -222,6 +232,7 @@ } declare @llvm.riscv.vadd.nxv32i8.nxv32i8( + , , , i32); @@ -234,6 +245,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -266,6 +278,7 @@ } declare @llvm.riscv.vadd.nxv64i8.nxv64i8( + , , , i32); @@ -278,6 +291,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -311,6 +325,7 @@ } declare @llvm.riscv.vadd.nxv1i16.nxv1i16( + , , , i32); @@ -323,6 +338,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -355,6 +371,7 @@ } declare @llvm.riscv.vadd.nxv2i16.nxv2i16( + , , , i32); @@ -367,6 +384,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -399,6 +417,7 @@ } declare @llvm.riscv.vadd.nxv4i16.nxv4i16( + , , , i32); @@ -411,6 +430,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -443,6 +463,7 @@ } declare @llvm.riscv.vadd.nxv8i16.nxv8i16( + , , , i32); @@ -455,6 +476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -487,6 +509,7 @@ } declare @llvm.riscv.vadd.nxv16i16.nxv16i16( + , , , i32); @@ -499,6 +522,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -531,6 +555,7 @@ } declare @llvm.riscv.vadd.nxv32i16.nxv32i16( + , , , i32); @@ -543,6 +568,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -576,6 +602,7 @@ } declare @llvm.riscv.vadd.nxv1i32.nxv1i32( + , , , i32); @@ -588,6 +615,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -620,6 +648,7 @@ } declare @llvm.riscv.vadd.nxv2i32.nxv2i32( + , , , i32); @@ -632,6 +661,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -664,6 +694,7 @@ } declare @llvm.riscv.vadd.nxv4i32.nxv4i32( + , , , i32); @@ -676,6 +707,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -708,6 +740,7 @@ } declare @llvm.riscv.vadd.nxv8i32.nxv8i32( + , , , i32); @@ -720,6 +753,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -752,6 +786,7 @@ } declare @llvm.riscv.vadd.nxv16i32.nxv16i32( + , , , i32); @@ -764,6 +799,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -797,6 +833,7 @@ } declare @llvm.riscv.vadd.nxv1i64.nxv1i64( + , , , i32); @@ -809,6 +846,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -841,6 +879,7 @@ } declare @llvm.riscv.vadd.nxv2i64.nxv2i64( + , , , i32); @@ -853,6 +892,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -885,6 +925,7 @@ } declare @llvm.riscv.vadd.nxv4i64.nxv4i64( + , , , i32); @@ -897,6 +938,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -929,6 +971,7 @@ } declare @llvm.riscv.vadd.nxv8i64.nxv8i64( + , , , i32); @@ -941,6 +984,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -974,6 +1018,7 @@ } declare @llvm.riscv.vadd.nxv1i8.i8( + , , i8, i32); @@ -986,6 +1031,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1018,6 +1064,7 @@ } declare @llvm.riscv.vadd.nxv2i8.i8( + , , i8, i32); @@ -1030,6 +1077,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1062,6 +1110,7 @@ } declare @llvm.riscv.vadd.nxv4i8.i8( + , , i8, i32); @@ -1074,6 +1123,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1106,6 +1156,7 @@ } declare @llvm.riscv.vadd.nxv8i8.i8( + , , i8, i32); @@ -1118,6 +1169,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1150,6 +1202,7 @@ } declare @llvm.riscv.vadd.nxv16i8.i8( + , , i8, i32); @@ -1162,6 +1215,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1194,6 +1248,7 @@ } declare @llvm.riscv.vadd.nxv32i8.i8( + , , i8, i32); @@ -1206,6 +1261,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1238,6 +1294,7 @@ } declare @llvm.riscv.vadd.nxv64i8.i8( + , , i8, i32); @@ -1250,6 +1307,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1282,6 +1340,7 @@ } declare @llvm.riscv.vadd.nxv1i16.i16( + , , i16, i32); @@ -1294,6 +1353,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1326,6 +1386,7 @@ } declare @llvm.riscv.vadd.nxv2i16.i16( + , , i16, i32); @@ -1338,6 +1399,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1370,6 +1432,7 @@ } declare @llvm.riscv.vadd.nxv4i16.i16( + , , i16, i32); @@ -1382,6 +1445,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1414,6 +1478,7 @@ } declare @llvm.riscv.vadd.nxv8i16.i16( + , , i16, i32); @@ -1426,6 +1491,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1458,6 +1524,7 @@ } declare @llvm.riscv.vadd.nxv16i16.i16( + , , i16, i32); @@ -1470,6 +1537,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1502,6 +1570,7 @@ } declare @llvm.riscv.vadd.nxv32i16.i16( + , , i16, i32); @@ -1514,6 +1583,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1546,6 +1616,7 @@ } declare @llvm.riscv.vadd.nxv1i32.i32( + , , i32, i32); @@ -1558,6 +1629,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1590,6 +1662,7 @@ } declare @llvm.riscv.vadd.nxv2i32.i32( + , , i32, i32); @@ -1602,6 +1675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1634,6 +1708,7 @@ } declare @llvm.riscv.vadd.nxv4i32.i32( + , , i32, i32); @@ -1646,6 +1721,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1678,6 +1754,7 @@ } declare @llvm.riscv.vadd.nxv8i32.i32( + , , i32, i32); @@ -1690,6 +1767,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1722,6 +1800,7 @@ } declare @llvm.riscv.vadd.nxv16i32.i32( + , , i32, i32); @@ -1734,6 +1813,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1766,6 +1846,7 @@ } declare @llvm.riscv.vadd.nxv1i64.i64( + , , i64, i32); @@ -1784,6 +1865,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1822,6 +1904,7 @@ } declare @llvm.riscv.vadd.nxv2i64.i64( + , , i64, i32); @@ -1840,6 +1923,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1878,6 +1962,7 @@ } declare @llvm.riscv.vadd.nxv4i64.i64( + , , i64, i32); @@ -1896,6 +1981,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1934,6 +2020,7 @@ } declare @llvm.riscv.vadd.nxv8i64.i64( + , , i64, i32); @@ -1952,6 +2039,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1997,6 +2085,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2029,6 +2118,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2061,6 +2151,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2093,6 +2184,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2125,6 +2217,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2157,6 +2250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2189,6 +2283,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv64i8.i8( + undef, %0, i8 -9, i32 %1) @@ -2221,6 +2316,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2253,6 +2349,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2285,6 +2382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2317,6 +2415,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2349,6 +2448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2381,6 +2481,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2413,6 +2514,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2445,6 +2547,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2477,6 +2580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2509,6 +2613,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2541,6 +2646,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2573,6 +2679,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2605,6 +2712,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2637,6 +2745,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2669,6 +2778,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vadd.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vadd.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vadd.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vadd.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vadd.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vadd.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vadd.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vadd.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vadd.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vadd.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vadd.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vadd.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vadd.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vadd.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vadd.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vadd.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vadd.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vadd.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vadd.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vadd.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vadd.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vadd.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vadd.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vadd.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vadd.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vadd.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vadd.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vadd.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vadd.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vadd.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vadd.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vadd.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vadd.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vadd.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vadd.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vadd.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vadd.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vadd.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vadd.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vadd.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vadd.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vadd.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vadd.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vadd.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vand.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vand.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vand.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vand.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vand.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vand.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vand.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vand.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vand.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vand.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vand.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vand.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vand.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vand.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vand.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vand.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vand.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vand.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vand.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vand.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vand.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vand.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vand.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vand.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vand.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vand.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vand.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vand.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vand.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vand.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vand.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vand.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vand.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vand.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vand.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vand.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vand.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vand.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vand.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vand.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vand.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vand.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vand.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vand.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -2041,6 +2129,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2073,6 +2162,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2105,6 +2195,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2137,6 +2228,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2169,6 +2261,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2201,6 +2294,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2233,6 +2327,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv64i8.i8( + undef, %0, i8 9, i32 %1) @@ -2265,6 +2360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2297,6 +2393,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2329,6 +2426,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2361,6 +2459,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2393,6 +2492,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2425,6 +2525,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2457,6 +2558,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2489,6 +2591,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2521,6 +2624,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2553,6 +2657,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2585,6 +2690,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2617,6 +2723,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2649,6 +2756,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2681,6 +2789,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2713,6 +2822,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vand.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vand.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vand.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vand.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vand.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vand.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vand.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vand.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vand.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vand.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vand.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vand.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vand.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vand.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vand.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vand.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vand.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vand.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vand.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vand.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vand.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vand.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vand.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vand.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vand.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vand.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vand.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vand.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vand.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vand.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vand.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vand.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vand.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vand.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vand.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vand.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vand.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vand.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vand.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vand.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vand.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vand.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vand.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vand.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vasub.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vasub.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vasub.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vasub.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vasub.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vasub.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vasub.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vasub.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vasub.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vasub.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vasub.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vasub.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vasub.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vasub.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vasub.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vasub.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vasub.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vasub.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vasub.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vasub.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vasub.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vasub.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vasub.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vasub.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vasub.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vasub.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vasub.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vasub.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vasub.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vasub.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vasub.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vasub.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vasub.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vasub.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vasub.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vasub.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vasub.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vasub.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vasub.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vasub.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vasub.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vasub.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vasub.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vasub.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vasub.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vasub.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vasub.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vasub.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vasub.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vasub.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vasub.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vasub.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vasub.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vasub.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vasub.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vasub.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vasub.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vasub.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vasub.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vasub.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vasub.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vasub.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vasub.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vasub.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vasub.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vasub.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vasub.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vasub.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vasub.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vasub.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vasub.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vasub.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vasub.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vasub.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vasub.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vasub.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vasub.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vasub.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vasub.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vasub.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vasub.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vasub.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vasub.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vasub.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vasub.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vasub.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vasub.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vasub.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasub.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vasubu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vasubu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vasubu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vasubu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vasubu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vasubu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vasubu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vasubu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vasubu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vasubu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vasubu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vasubu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vasubu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vasubu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vasubu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vasubu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vasubu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vasubu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vasubu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vasubu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vasubu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vasubu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vasubu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vasubu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vasubu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vasubu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vasubu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vasubu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vasubu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vasubu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vasubu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vasubu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vasubu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vasubu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vasubu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vasubu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vasubu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vasubu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vasubu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vasubu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vasubu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vasubu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vasubu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vasubu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vasubu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vasubu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vasubu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vasubu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vasubu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vasubu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vasubu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vasubu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vasubu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vasubu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vasubu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vasubu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vasubu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vasubu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vasubu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vasubu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vasubu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vasubu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vasubu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vasubu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vasubu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vasubu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vasubu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vasubu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vasubu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vasubu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vasubu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vasubu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vasubu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vasubu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vasubu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vasubu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vasubu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vasubu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vasubu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vasubu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vasubu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vasubu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vasubu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vasubu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vasubu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vasubu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vasubu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vasubu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vasubu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vdiv.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vdiv.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vdiv.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vdiv.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vdiv.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vdiv.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vdiv.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vdiv.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vdiv.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vdiv.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vdiv.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vdiv.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vdiv.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vdiv.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vdiv.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vdiv.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vdiv.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vdiv.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vdiv.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vdiv.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vdiv.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vdiv.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vdiv.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vdiv.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vdiv.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vdiv.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vdiv.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vdiv.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vdiv.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vdiv.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vdiv.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vdiv.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vdiv.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vdiv.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vdiv.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vdiv.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vdiv.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vdiv.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vdiv.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vdiv.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vdiv.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vdiv.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vdiv.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vdiv.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vdiv.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vdiv.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vdiv.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vdiv.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vdiv.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vdiv.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vdiv.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vdiv.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vdiv.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vdiv.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vdiv.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vdiv.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vdiv.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vdiv.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vdiv.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vdiv.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vdiv.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vdiv.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vdiv.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vdiv.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vdiv.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vdiv.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vdiv.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vdiv.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vdiv.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vdiv.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vdiv.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vdiv.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vdiv.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vdiv.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vdiv.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vdiv.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vdiv.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vdiv.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vdiv.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vdiv.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vdiv.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vdiv.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vdiv.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vdiv.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vdiv.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vdiv.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vdiv.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vdiv.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vdivu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vdivu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vdivu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vdivu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vdivu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vdivu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vdivu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vdivu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vdivu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vdivu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vdivu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vdivu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vdivu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vdivu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vdivu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vdivu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vdivu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vdivu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vdivu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vdivu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vdivu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vdivu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vdivu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vdivu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vdivu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vdivu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vdivu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vdivu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vdivu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vdivu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vdivu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vdivu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vdivu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vdivu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vdivu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vdivu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vdivu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vdivu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vdivu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vdivu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vdivu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vdivu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vdivu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vdivu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vdivu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vdivu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vdivu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vdivu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vdivu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vdivu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vdivu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vdivu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vdivu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vdivu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vdivu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vdivu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vdivu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vdivu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vdivu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vdivu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vdivu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vdivu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vdivu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vdivu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vdivu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vdivu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vdivu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vdivu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vdivu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vdivu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vdivu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vdivu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vdivu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vdivu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vdivu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vdivu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vdivu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vdivu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vdivu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vdivu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vdivu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vdivu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vdivu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vdivu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vdivu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vdivu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vdivu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vdivu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdivu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ } declare @llvm.riscv.vfadd.nxv2f16.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vfadd.nxv4f16.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ } declare @llvm.riscv.vfadd.nxv8f16.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ } declare @llvm.riscv.vfadd.nxv16f16.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vfadd.nxv32f16.nxv32f16( + , , , iXLen); @@ -241,6 +252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vfadd.nxv1f32.nxv1f32( + , , , iXLen); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vfadd.nxv2f32.nxv2f32( + , , , iXLen); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vfadd.nxv4f32.nxv4f32( + , , , iXLen); @@ -377,6 +394,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -410,6 +428,7 @@ } declare @llvm.riscv.vfadd.nxv8f32.nxv8f32( + , , , iXLen); @@ -422,6 +441,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -455,6 +475,7 @@ } declare @llvm.riscv.vfadd.nxv16f32.nxv16f32( + , , , iXLen); @@ -467,6 +488,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -501,6 +523,7 @@ } declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( + , , , iXLen); @@ -513,6 +536,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -546,6 +570,7 @@ } declare @llvm.riscv.vfadd.nxv2f64.nxv2f64( + , , , iXLen); @@ -558,6 +583,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -591,6 +617,7 @@ } declare @llvm.riscv.vfadd.nxv4f64.nxv4f64( + , , , iXLen); @@ -603,6 +630,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -636,6 +664,7 @@ } declare @llvm.riscv.vfadd.nxv8f64.nxv8f64( + , , , iXLen); @@ -648,6 +677,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -682,6 +712,7 @@ } declare @llvm.riscv.vfadd.nxv1f16.f16( + , , half, iXLen); @@ -694,6 +725,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -727,6 +759,7 @@ } declare @llvm.riscv.vfadd.nxv2f16.f16( + , , half, iXLen); @@ -739,6 +772,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -772,6 +806,7 @@ } declare @llvm.riscv.vfadd.nxv4f16.f16( + , , half, iXLen); @@ -784,6 +819,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -817,6 +853,7 @@ } declare @llvm.riscv.vfadd.nxv8f16.f16( + , , half, iXLen); @@ -829,6 +866,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -862,6 +900,7 @@ } declare @llvm.riscv.vfadd.nxv16f16.f16( + , , half, iXLen); @@ -874,6 +913,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -907,6 +947,7 @@ } declare @llvm.riscv.vfadd.nxv32f16.f16( + , , half, iXLen); @@ -919,6 +960,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -952,6 +994,7 @@ } declare @llvm.riscv.vfadd.nxv1f32.f32( + , , float, iXLen); @@ -964,6 +1007,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -997,6 +1041,7 @@ } declare @llvm.riscv.vfadd.nxv2f32.f32( + , , float, iXLen); @@ -1009,6 +1054,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1042,6 +1088,7 @@ } declare @llvm.riscv.vfadd.nxv4f32.f32( + , , float, iXLen); @@ -1054,6 +1101,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1087,6 +1135,7 @@ } declare @llvm.riscv.vfadd.nxv8f32.f32( + , , float, iXLen); @@ -1099,6 +1148,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1132,6 +1182,7 @@ } declare @llvm.riscv.vfadd.nxv16f32.f32( + , , float, iXLen); @@ -1144,6 +1195,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1177,6 +1229,7 @@ } declare @llvm.riscv.vfadd.nxv1f64.f64( + , , double, iXLen); @@ -1189,6 +1242,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1222,6 +1276,7 @@ } declare @llvm.riscv.vfadd.nxv2f64.f64( + , , double, iXLen); @@ -1234,6 +1289,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1267,6 +1323,7 @@ } declare @llvm.riscv.vfadd.nxv4f64.f64( + , , double, iXLen); @@ -1279,6 +1336,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1312,6 +1370,7 @@ } declare @llvm.riscv.vfadd.nxv8f64.f64( + , , double, iXLen); @@ -1324,6 +1383,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfdiv.nxv1f16.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ } declare @llvm.riscv.vfdiv.nxv2f16.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vfdiv.nxv4f16.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ } declare @llvm.riscv.vfdiv.nxv8f16.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ } declare @llvm.riscv.vfdiv.nxv16f16.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vfdiv.nxv32f16.nxv32f16( + , , , iXLen); @@ -241,6 +252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vfdiv.nxv1f32.nxv1f32( + , , , iXLen); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vfdiv.nxv2f32.nxv2f32( + , , , iXLen); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vfdiv.nxv4f32.nxv4f32( + , , , iXLen); @@ -377,6 +394,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -410,6 +428,7 @@ } declare @llvm.riscv.vfdiv.nxv8f32.nxv8f32( + , , , iXLen); @@ -422,6 +441,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -455,6 +475,7 @@ } declare @llvm.riscv.vfdiv.nxv16f32.nxv16f32( + , , , iXLen); @@ -467,6 +488,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -501,6 +523,7 @@ } declare @llvm.riscv.vfdiv.nxv1f64.nxv1f64( + , , , iXLen); @@ -513,6 +536,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -546,6 +570,7 @@ } declare @llvm.riscv.vfdiv.nxv2f64.nxv2f64( + , , , iXLen); @@ -558,6 +583,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -591,6 +617,7 @@ } declare @llvm.riscv.vfdiv.nxv4f64.nxv4f64( + , , , iXLen); @@ -603,6 +630,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -636,6 +664,7 @@ } declare @llvm.riscv.vfdiv.nxv8f64.nxv8f64( + , , , iXLen); @@ -648,6 +677,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -682,6 +712,7 @@ } declare @llvm.riscv.vfdiv.nxv1f16.f16( + , , half, iXLen); @@ -694,6 +725,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -727,6 +759,7 @@ } declare @llvm.riscv.vfdiv.nxv2f16.f16( + , , half, iXLen); @@ -739,6 +772,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -772,6 +806,7 @@ } declare @llvm.riscv.vfdiv.nxv4f16.f16( + , , half, iXLen); @@ -784,6 +819,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -817,6 +853,7 @@ } declare @llvm.riscv.vfdiv.nxv8f16.f16( + , , half, iXLen); @@ -829,6 +866,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -862,6 +900,7 @@ } declare @llvm.riscv.vfdiv.nxv16f16.f16( + , , half, iXLen); @@ -874,6 +913,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -907,6 +947,7 @@ } declare @llvm.riscv.vfdiv.nxv32f16.f16( + , , half, iXLen); @@ -919,6 +960,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -952,6 +994,7 @@ } declare @llvm.riscv.vfdiv.nxv1f32.f32( + , , float, iXLen); @@ -964,6 +1007,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -997,6 +1041,7 @@ } declare @llvm.riscv.vfdiv.nxv2f32.f32( + , , float, iXLen); @@ -1009,6 +1054,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1042,6 +1088,7 @@ } declare @llvm.riscv.vfdiv.nxv4f32.f32( + , , float, iXLen); @@ -1054,6 +1101,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1087,6 +1135,7 @@ } declare @llvm.riscv.vfdiv.nxv8f32.f32( + , , float, iXLen); @@ -1099,6 +1148,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1132,6 +1182,7 @@ } declare @llvm.riscv.vfdiv.nxv16f32.f32( + , , float, iXLen); @@ -1144,6 +1195,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1177,6 +1229,7 @@ } declare @llvm.riscv.vfdiv.nxv1f64.f64( + , , double, iXLen); @@ -1189,6 +1242,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1222,6 +1276,7 @@ } declare @llvm.riscv.vfdiv.nxv2f64.f64( + , , double, iXLen); @@ -1234,6 +1289,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1267,6 +1323,7 @@ } declare @llvm.riscv.vfdiv.nxv4f64.f64( + , , double, iXLen); @@ -1279,6 +1336,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1312,6 +1370,7 @@ } declare @llvm.riscv.vfdiv.nxv8f64.f64( + , , double, iXLen); @@ -1324,6 +1383,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmax.nxv1f16.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ } declare @llvm.riscv.vfmax.nxv2f16.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vfmax.nxv4f16.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ } declare @llvm.riscv.vfmax.nxv8f16.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ } declare @llvm.riscv.vfmax.nxv16f16.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vfmax.nxv32f16.nxv32f16( + , , , iXLen); @@ -241,6 +252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vfmax.nxv1f32.nxv1f32( + , , , iXLen); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vfmax.nxv2f32.nxv2f32( + , , , iXLen); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vfmax.nxv4f32.nxv4f32( + , , , iXLen); @@ -377,6 +394,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -410,6 +428,7 @@ } declare @llvm.riscv.vfmax.nxv8f32.nxv8f32( + , , , iXLen); @@ -422,6 +441,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -455,6 +475,7 @@ } declare @llvm.riscv.vfmax.nxv16f32.nxv16f32( + , , , iXLen); @@ -467,6 +488,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -501,6 +523,7 @@ } declare @llvm.riscv.vfmax.nxv1f64.nxv1f64( + , , , iXLen); @@ -513,6 +536,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -546,6 +570,7 @@ } declare @llvm.riscv.vfmax.nxv2f64.nxv2f64( + , , , iXLen); @@ -558,6 +583,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -591,6 +617,7 @@ } declare @llvm.riscv.vfmax.nxv4f64.nxv4f64( + , , , iXLen); @@ -603,6 +630,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -636,6 +664,7 @@ } declare @llvm.riscv.vfmax.nxv8f64.nxv8f64( + , , , iXLen); @@ -648,6 +677,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -682,6 +712,7 @@ } declare @llvm.riscv.vfmax.nxv1f16.f16( + , , half, iXLen); @@ -694,6 +725,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -727,6 +759,7 @@ } declare @llvm.riscv.vfmax.nxv2f16.f16( + , , half, iXLen); @@ -739,6 +772,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -772,6 +806,7 @@ } declare @llvm.riscv.vfmax.nxv4f16.f16( + , , half, iXLen); @@ -784,6 +819,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -817,6 +853,7 @@ } declare @llvm.riscv.vfmax.nxv8f16.f16( + , , half, iXLen); @@ -829,6 +866,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -862,6 +900,7 @@ } declare @llvm.riscv.vfmax.nxv16f16.f16( + , , half, iXLen); @@ -874,6 +913,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -907,6 +947,7 @@ } declare @llvm.riscv.vfmax.nxv32f16.f16( + , , half, iXLen); @@ -919,6 +960,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -952,6 +994,7 @@ } declare @llvm.riscv.vfmax.nxv1f32.f32( + , , float, iXLen); @@ -964,6 +1007,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -997,6 +1041,7 @@ } declare @llvm.riscv.vfmax.nxv2f32.f32( + , , float, iXLen); @@ -1009,6 +1054,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1042,6 +1088,7 @@ } declare @llvm.riscv.vfmax.nxv4f32.f32( + , , float, iXLen); @@ -1054,6 +1101,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1087,6 +1135,7 @@ } declare @llvm.riscv.vfmax.nxv8f32.f32( + , , float, iXLen); @@ -1099,6 +1148,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1132,6 +1182,7 @@ } declare @llvm.riscv.vfmax.nxv16f32.f32( + , , float, iXLen); @@ -1144,6 +1195,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1177,6 +1229,7 @@ } declare @llvm.riscv.vfmax.nxv1f64.f64( + , , double, iXLen); @@ -1189,6 +1242,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1222,6 +1276,7 @@ } declare @llvm.riscv.vfmax.nxv2f64.f64( + , , double, iXLen); @@ -1234,6 +1289,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1267,6 +1323,7 @@ } declare @llvm.riscv.vfmax.nxv4f64.f64( + , , double, iXLen); @@ -1279,6 +1336,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1312,6 +1370,7 @@ } declare @llvm.riscv.vfmax.nxv8f64.f64( + , , double, iXLen); @@ -1324,6 +1383,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmin.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmin.nxv1f16.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ } declare @llvm.riscv.vfmin.nxv2f16.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vfmin.nxv4f16.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ } declare @llvm.riscv.vfmin.nxv8f16.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ } declare @llvm.riscv.vfmin.nxv16f16.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vfmin.nxv32f16.nxv32f16( + , , , iXLen); @@ -241,6 +252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vfmin.nxv1f32.nxv1f32( + , , , iXLen); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vfmin.nxv2f32.nxv2f32( + , , , iXLen); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vfmin.nxv4f32.nxv4f32( + , , , iXLen); @@ -377,6 +394,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -410,6 +428,7 @@ } declare @llvm.riscv.vfmin.nxv8f32.nxv8f32( + , , , iXLen); @@ -422,6 +441,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -455,6 +475,7 @@ } declare @llvm.riscv.vfmin.nxv16f32.nxv16f32( + , , , iXLen); @@ -467,6 +488,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -501,6 +523,7 @@ } declare @llvm.riscv.vfmin.nxv1f64.nxv1f64( + , , , iXLen); @@ -513,6 +536,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -546,6 +570,7 @@ } declare @llvm.riscv.vfmin.nxv2f64.nxv2f64( + , , , iXLen); @@ -558,6 +583,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -591,6 +617,7 @@ } declare @llvm.riscv.vfmin.nxv4f64.nxv4f64( + , , , iXLen); @@ -603,6 +630,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -636,6 +664,7 @@ } declare @llvm.riscv.vfmin.nxv8f64.nxv8f64( + , , , iXLen); @@ -648,6 +677,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -682,6 +712,7 @@ } declare @llvm.riscv.vfmin.nxv1f16.f16( + , , half, iXLen); @@ -694,6 +725,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -727,6 +759,7 @@ } declare @llvm.riscv.vfmin.nxv2f16.f16( + , , half, iXLen); @@ -739,6 +772,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -772,6 +806,7 @@ } declare @llvm.riscv.vfmin.nxv4f16.f16( + , , half, iXLen); @@ -784,6 +819,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -817,6 +853,7 @@ } declare @llvm.riscv.vfmin.nxv8f16.f16( + , , half, iXLen); @@ -829,6 +866,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -862,6 +900,7 @@ } declare @llvm.riscv.vfmin.nxv16f16.f16( + , , half, iXLen); @@ -874,6 +913,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -907,6 +947,7 @@ } declare @llvm.riscv.vfmin.nxv32f16.f16( + , , half, iXLen); @@ -919,6 +960,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -952,6 +994,7 @@ } declare @llvm.riscv.vfmin.nxv1f32.f32( + , , float, iXLen); @@ -964,6 +1007,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -997,6 +1041,7 @@ } declare @llvm.riscv.vfmin.nxv2f32.f32( + , , float, iXLen); @@ -1009,6 +1054,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1042,6 +1088,7 @@ } declare @llvm.riscv.vfmin.nxv4f32.f32( + , , float, iXLen); @@ -1054,6 +1101,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1087,6 +1135,7 @@ } declare @llvm.riscv.vfmin.nxv8f32.f32( + , , float, iXLen); @@ -1099,6 +1148,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1132,6 +1182,7 @@ } declare @llvm.riscv.vfmin.nxv16f32.f32( + , , float, iXLen); @@ -1144,6 +1195,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1177,6 +1229,7 @@ } declare @llvm.riscv.vfmin.nxv1f64.f64( + , , double, iXLen); @@ -1189,6 +1242,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1222,6 +1276,7 @@ } declare @llvm.riscv.vfmin.nxv2f64.f64( + , , double, iXLen); @@ -1234,6 +1289,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1267,6 +1323,7 @@ } declare @llvm.riscv.vfmin.nxv4f64.f64( + , , double, iXLen); @@ -1279,6 +1336,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1312,6 +1370,7 @@ } declare @llvm.riscv.vfmin.nxv8f64.f64( + , , double, iXLen); @@ -1324,6 +1383,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfmul.nxv1f16.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ } declare @llvm.riscv.vfmul.nxv2f16.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vfmul.nxv4f16.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ } declare @llvm.riscv.vfmul.nxv8f16.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ } declare @llvm.riscv.vfmul.nxv16f16.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vfmul.nxv32f16.nxv32f16( + , , , iXLen); @@ -241,6 +252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vfmul.nxv1f32.nxv1f32( + , , , iXLen); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vfmul.nxv2f32.nxv2f32( + , , , iXLen); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vfmul.nxv4f32.nxv4f32( + , , , iXLen); @@ -377,6 +394,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -410,6 +428,7 @@ } declare @llvm.riscv.vfmul.nxv8f32.nxv8f32( + , , , iXLen); @@ -422,6 +441,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -455,6 +475,7 @@ } declare @llvm.riscv.vfmul.nxv16f32.nxv16f32( + , , , iXLen); @@ -467,6 +488,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -501,6 +523,7 @@ } declare @llvm.riscv.vfmul.nxv1f64.nxv1f64( + , , , iXLen); @@ -513,6 +536,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -546,6 +570,7 @@ } declare @llvm.riscv.vfmul.nxv2f64.nxv2f64( + , , , iXLen); @@ -558,6 +583,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -591,6 +617,7 @@ } declare @llvm.riscv.vfmul.nxv4f64.nxv4f64( + , , , iXLen); @@ -603,6 +630,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -636,6 +664,7 @@ } declare @llvm.riscv.vfmul.nxv8f64.nxv8f64( + , , , iXLen); @@ -648,6 +677,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -682,6 +712,7 @@ } declare @llvm.riscv.vfmul.nxv1f16.f16( + , , half, iXLen); @@ -694,6 +725,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -727,6 +759,7 @@ } declare @llvm.riscv.vfmul.nxv2f16.f16( + , , half, iXLen); @@ -739,6 +772,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -772,6 +806,7 @@ } declare @llvm.riscv.vfmul.nxv4f16.f16( + , , half, iXLen); @@ -784,6 +819,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -817,6 +853,7 @@ } declare @llvm.riscv.vfmul.nxv8f16.f16( + , , half, iXLen); @@ -829,6 +866,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -862,6 +900,7 @@ } declare @llvm.riscv.vfmul.nxv16f16.f16( + , , half, iXLen); @@ -874,6 +913,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -907,6 +947,7 @@ } declare @llvm.riscv.vfmul.nxv32f16.f16( + , , half, iXLen); @@ -919,6 +960,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -952,6 +994,7 @@ } declare @llvm.riscv.vfmul.nxv1f32.f32( + , , float, iXLen); @@ -964,6 +1007,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -997,6 +1041,7 @@ } declare @llvm.riscv.vfmul.nxv2f32.f32( + , , float, iXLen); @@ -1009,6 +1054,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1042,6 +1088,7 @@ } declare @llvm.riscv.vfmul.nxv4f32.f32( + , , float, iXLen); @@ -1054,6 +1101,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1087,6 +1135,7 @@ } declare @llvm.riscv.vfmul.nxv8f32.f32( + , , float, iXLen); @@ -1099,6 +1148,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1132,6 +1182,7 @@ } declare @llvm.riscv.vfmul.nxv16f32.f32( + , , float, iXLen); @@ -1144,6 +1195,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1177,6 +1229,7 @@ } declare @llvm.riscv.vfmul.nxv1f64.f64( + , , double, iXLen); @@ -1189,6 +1242,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1222,6 +1276,7 @@ } declare @llvm.riscv.vfmul.nxv2f64.f64( + , , double, iXLen); @@ -1234,6 +1289,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1267,6 +1323,7 @@ } declare @llvm.riscv.vfmul.nxv4f64.f64( + , , double, iXLen); @@ -1279,6 +1336,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1312,6 +1370,7 @@ } declare @llvm.riscv.vfmul.nxv8f64.f64( + , , double, iXLen); @@ -1324,6 +1383,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfrdiv.nxv1f16.f16( + , , half, iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -49,6 +51,7 @@ } declare @llvm.riscv.vfrdiv.nxv2f16.f16( + , , half, iXLen); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vfrdiv.nxv4f16.f16( + , , half, iXLen); @@ -106,6 +111,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -139,6 +145,7 @@ } declare @llvm.riscv.vfrdiv.nxv8f16.f16( + , , half, iXLen); @@ -151,6 +158,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -184,6 +192,7 @@ } declare @llvm.riscv.vfrdiv.nxv16f16.f16( + , , half, iXLen); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vfrdiv.nxv32f16.f16( + , , half, iXLen); @@ -241,6 +252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -274,6 +286,7 @@ } declare @llvm.riscv.vfrdiv.nxv1f32.f32( + , , float, iXLen); @@ -286,6 +299,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -319,6 +333,7 @@ } declare @llvm.riscv.vfrdiv.nxv2f32.f32( + , , float, iXLen); @@ -331,6 +346,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -364,6 +380,7 @@ } declare @llvm.riscv.vfrdiv.nxv4f32.f32( + , , float, iXLen); @@ -376,6 +393,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -409,6 +427,7 @@ } declare @llvm.riscv.vfrdiv.nxv8f32.f32( + , , float, iXLen); @@ -421,6 +440,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -454,6 +474,7 @@ } declare @llvm.riscv.vfrdiv.nxv16f32.f32( + , , float, iXLen); @@ -466,6 +487,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -499,6 +521,7 @@ } declare @llvm.riscv.vfrdiv.nxv1f64.f64( + , , double, iXLen); @@ -511,6 +534,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -544,6 +568,7 @@ } declare @llvm.riscv.vfrdiv.nxv2f64.f64( + , , double, iXLen); @@ -556,6 +581,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vfrdiv.nxv4f64.f64( + , , double, iXLen); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vfrdiv.nxv8f64.f64( + , , double, iXLen); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfrsub.nxv1f16.f16( + , , half, iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -49,6 +51,7 @@ } declare @llvm.riscv.vfrsub.nxv2f16.f16( + , , half, iXLen); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vfrsub.nxv4f16.f16( + , , half, iXLen); @@ -106,6 +111,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -139,6 +145,7 @@ } declare @llvm.riscv.vfrsub.nxv8f16.f16( + , , half, iXLen); @@ -151,6 +158,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -184,6 +192,7 @@ } declare @llvm.riscv.vfrsub.nxv16f16.f16( + , , half, iXLen); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vfrsub.nxv32f16.f16( + , , half, iXLen); @@ -241,6 +252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -274,6 +286,7 @@ } declare @llvm.riscv.vfrsub.nxv1f32.f32( + , , float, iXLen); @@ -286,6 +299,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -319,6 +333,7 @@ } declare @llvm.riscv.vfrsub.nxv2f32.f32( + , , float, iXLen); @@ -331,6 +346,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -364,6 +380,7 @@ } declare @llvm.riscv.vfrsub.nxv4f32.f32( + , , float, iXLen); @@ -376,6 +393,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -409,6 +427,7 @@ } declare @llvm.riscv.vfrsub.nxv8f32.f32( + , , float, iXLen); @@ -421,6 +440,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -454,6 +474,7 @@ } declare @llvm.riscv.vfrsub.nxv16f32.f32( + , , float, iXLen); @@ -466,6 +487,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -499,6 +521,7 @@ } declare @llvm.riscv.vfrsub.nxv1f64.f64( + , , double, iXLen); @@ -511,6 +534,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -544,6 +568,7 @@ } declare @llvm.riscv.vfrsub.nxv2f64.f64( + , , double, iXLen); @@ -556,6 +581,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vfrsub.nxv4f64.f64( + , , double, iXLen); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vfrsub.nxv8f64.f64( + , , double, iXLen); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ } declare @llvm.riscv.vfsgnj.nxv2f16.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vfsgnj.nxv4f16.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ } declare @llvm.riscv.vfsgnj.nxv8f16.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ } declare @llvm.riscv.vfsgnj.nxv16f16.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vfsgnj.nxv32f16.nxv32f16( + , , , iXLen); @@ -241,6 +252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vfsgnj.nxv1f32.nxv1f32( + , , , iXLen); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vfsgnj.nxv2f32.nxv2f32( + , , , iXLen); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vfsgnj.nxv4f32.nxv4f32( + , , , iXLen); @@ -377,6 +394,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -410,6 +428,7 @@ } declare @llvm.riscv.vfsgnj.nxv8f32.nxv8f32( + , , , iXLen); @@ -422,6 +441,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -455,6 +475,7 @@ } declare @llvm.riscv.vfsgnj.nxv16f32.nxv16f32( + , , , iXLen); @@ -467,6 +488,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -501,6 +523,7 @@ } declare @llvm.riscv.vfsgnj.nxv1f64.nxv1f64( + , , , iXLen); @@ -513,6 +536,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -546,6 +570,7 @@ } declare @llvm.riscv.vfsgnj.nxv2f64.nxv2f64( + , , , iXLen); @@ -558,6 +583,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -591,6 +617,7 @@ } declare @llvm.riscv.vfsgnj.nxv4f64.nxv4f64( + , , , iXLen); @@ -603,6 +630,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -636,6 +664,7 @@ } declare @llvm.riscv.vfsgnj.nxv8f64.nxv8f64( + , , , iXLen); @@ -648,6 +677,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -682,6 +712,7 @@ } declare @llvm.riscv.vfsgnj.nxv1f16.f16( + , , half, iXLen); @@ -694,6 +725,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -727,6 +759,7 @@ } declare @llvm.riscv.vfsgnj.nxv2f16.f16( + , , half, iXLen); @@ -739,6 +772,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -772,6 +806,7 @@ } declare @llvm.riscv.vfsgnj.nxv4f16.f16( + , , half, iXLen); @@ -784,6 +819,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -817,6 +853,7 @@ } declare @llvm.riscv.vfsgnj.nxv8f16.f16( + , , half, iXLen); @@ -829,6 +866,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -862,6 +900,7 @@ } declare @llvm.riscv.vfsgnj.nxv16f16.f16( + , , half, iXLen); @@ -874,6 +913,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -907,6 +947,7 @@ } declare @llvm.riscv.vfsgnj.nxv32f16.f16( + , , half, iXLen); @@ -919,6 +960,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -952,6 +994,7 @@ } declare @llvm.riscv.vfsgnj.nxv1f32.f32( + , , float, iXLen); @@ -964,6 +1007,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -997,6 +1041,7 @@ } declare @llvm.riscv.vfsgnj.nxv2f32.f32( + , , float, iXLen); @@ -1009,6 +1054,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1042,6 +1088,7 @@ } declare @llvm.riscv.vfsgnj.nxv4f32.f32( + , , float, iXLen); @@ -1054,6 +1101,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1087,6 +1135,7 @@ } declare @llvm.riscv.vfsgnj.nxv8f32.f32( + , , float, iXLen); @@ -1099,6 +1148,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1132,6 +1182,7 @@ } declare @llvm.riscv.vfsgnj.nxv16f32.f32( + , , float, iXLen); @@ -1144,6 +1195,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1177,6 +1229,7 @@ } declare @llvm.riscv.vfsgnj.nxv1f64.f64( + , , double, iXLen); @@ -1189,6 +1242,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1222,6 +1276,7 @@ } declare @llvm.riscv.vfsgnj.nxv2f64.f64( + , , double, iXLen); @@ -1234,6 +1289,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1267,6 +1323,7 @@ } declare @llvm.riscv.vfsgnj.nxv4f64.f64( + , , double, iXLen); @@ -1279,6 +1336,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1312,6 +1370,7 @@ } declare @llvm.riscv.vfsgnj.nxv8f64.f64( + , , double, iXLen); @@ -1324,6 +1383,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ } declare @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ } declare @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ } declare @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16( + , , , iXLen); @@ -241,6 +252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32( + , , , iXLen); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32( + , , , iXLen); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32( + , , , iXLen); @@ -377,6 +394,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -410,6 +428,7 @@ } declare @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32( + , , , iXLen); @@ -422,6 +441,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -455,6 +475,7 @@ } declare @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32( + , , , iXLen); @@ -467,6 +488,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -501,6 +523,7 @@ } declare @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64( + , , , iXLen); @@ -513,6 +536,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -546,6 +570,7 @@ } declare @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64( + , , , iXLen); @@ -558,6 +583,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -591,6 +617,7 @@ } declare @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64( + , , , iXLen); @@ -603,6 +630,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -636,6 +664,7 @@ } declare @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64( + , , , iXLen); @@ -648,6 +677,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -682,6 +712,7 @@ } declare @llvm.riscv.vfsgnjn.nxv1f16.f16( + , , half, iXLen); @@ -694,6 +725,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -727,6 +759,7 @@ } declare @llvm.riscv.vfsgnjn.nxv2f16.f16( + , , half, iXLen); @@ -739,6 +772,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -772,6 +806,7 @@ } declare @llvm.riscv.vfsgnjn.nxv4f16.f16( + , , half, iXLen); @@ -784,6 +819,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -817,6 +853,7 @@ } declare @llvm.riscv.vfsgnjn.nxv8f16.f16( + , , half, iXLen); @@ -829,6 +866,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -862,6 +900,7 @@ } declare @llvm.riscv.vfsgnjn.nxv16f16.f16( + , , half, iXLen); @@ -874,6 +913,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -907,6 +947,7 @@ } declare @llvm.riscv.vfsgnjn.nxv32f16.f16( + , , half, iXLen); @@ -919,6 +960,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -952,6 +994,7 @@ } declare @llvm.riscv.vfsgnjn.nxv1f32.f32( + , , float, iXLen); @@ -964,6 +1007,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -997,6 +1041,7 @@ } declare @llvm.riscv.vfsgnjn.nxv2f32.f32( + , , float, iXLen); @@ -1009,6 +1054,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1042,6 +1088,7 @@ } declare @llvm.riscv.vfsgnjn.nxv4f32.f32( + , , float, iXLen); @@ -1054,6 +1101,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1087,6 +1135,7 @@ } declare @llvm.riscv.vfsgnjn.nxv8f32.f32( + , , float, iXLen); @@ -1099,6 +1148,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1132,6 +1182,7 @@ } declare @llvm.riscv.vfsgnjn.nxv16f32.f32( + , , float, iXLen); @@ -1144,6 +1195,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1177,6 +1229,7 @@ } declare @llvm.riscv.vfsgnjn.nxv1f64.f64( + , , double, iXLen); @@ -1189,6 +1242,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1222,6 +1276,7 @@ } declare @llvm.riscv.vfsgnjn.nxv2f64.f64( + , , double, iXLen); @@ -1234,6 +1289,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1267,6 +1323,7 @@ } declare @llvm.riscv.vfsgnjn.nxv4f64.f64( + , , double, iXLen); @@ -1279,6 +1336,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1312,6 +1370,7 @@ } declare @llvm.riscv.vfsgnjn.nxv8f64.f64( + , , double, iXLen); @@ -1324,6 +1383,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ } declare @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ } declare @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ } declare @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16( + , , , iXLen); @@ -241,6 +252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32( + , , , iXLen); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32( + , , , iXLen); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32( + , , , iXLen); @@ -377,6 +394,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -410,6 +428,7 @@ } declare @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32( + , , , iXLen); @@ -422,6 +441,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -455,6 +475,7 @@ } declare @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32( + , , , iXLen); @@ -467,6 +488,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -501,6 +523,7 @@ } declare @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64( + , , , iXLen); @@ -513,6 +536,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -546,6 +570,7 @@ } declare @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64( + , , , iXLen); @@ -558,6 +583,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -591,6 +617,7 @@ } declare @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64( + , , , iXLen); @@ -603,6 +630,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -636,6 +664,7 @@ } declare @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64( + , , , iXLen); @@ -648,6 +677,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -682,6 +712,7 @@ } declare @llvm.riscv.vfsgnjx.nxv1f16.f16( + , , half, iXLen); @@ -694,6 +725,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -727,6 +759,7 @@ } declare @llvm.riscv.vfsgnjx.nxv2f16.f16( + , , half, iXLen); @@ -739,6 +772,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -772,6 +806,7 @@ } declare @llvm.riscv.vfsgnjx.nxv4f16.f16( + , , half, iXLen); @@ -784,6 +819,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -817,6 +853,7 @@ } declare @llvm.riscv.vfsgnjx.nxv8f16.f16( + , , half, iXLen); @@ -829,6 +866,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -862,6 +900,7 @@ } declare @llvm.riscv.vfsgnjx.nxv16f16.f16( + , , half, iXLen); @@ -874,6 +913,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -907,6 +947,7 @@ } declare @llvm.riscv.vfsgnjx.nxv32f16.f16( + , , half, iXLen); @@ -919,6 +960,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -952,6 +994,7 @@ } declare @llvm.riscv.vfsgnjx.nxv1f32.f32( + , , float, iXLen); @@ -964,6 +1007,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -997,6 +1041,7 @@ } declare @llvm.riscv.vfsgnjx.nxv2f32.f32( + , , float, iXLen); @@ -1009,6 +1054,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1042,6 +1088,7 @@ } declare @llvm.riscv.vfsgnjx.nxv4f32.f32( + , , float, iXLen); @@ -1054,6 +1101,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1087,6 +1135,7 @@ } declare @llvm.riscv.vfsgnjx.nxv8f32.f32( + , , float, iXLen); @@ -1099,6 +1148,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1132,6 +1182,7 @@ } declare @llvm.riscv.vfsgnjx.nxv16f32.f32( + , , float, iXLen); @@ -1144,6 +1195,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1177,6 +1229,7 @@ } declare @llvm.riscv.vfsgnjx.nxv1f64.f64( + , , double, iXLen); @@ -1189,6 +1242,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1222,6 +1276,7 @@ } declare @llvm.riscv.vfsgnjx.nxv2f64.f64( + , , double, iXLen); @@ -1234,6 +1289,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1267,6 +1323,7 @@ } declare @llvm.riscv.vfsgnjx.nxv4f64.f64( + , , double, iXLen); @@ -1279,6 +1336,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1312,6 +1370,7 @@ } declare @llvm.riscv.vfsgnjx.nxv8f64.f64( + , , double, iXLen); @@ -1324,6 +1383,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfslide1down.nxv1f16.f16( + , , half, iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -49,6 +51,7 @@ } declare @llvm.riscv.vfslide1down.nxv2f16.f16( + , , half, iXLen); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vfslide1down.nxv4f16.f16( + , , half, iXLen); @@ -106,6 +111,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -139,6 +145,7 @@ } declare @llvm.riscv.vfslide1down.nxv8f16.f16( + , , half, iXLen); @@ -151,6 +158,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -184,6 +192,7 @@ } declare @llvm.riscv.vfslide1down.nxv16f16.f16( + , , half, iXLen); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vfslide1down.nxv32f16.f16( + , , half, iXLen); @@ -241,6 +252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -274,6 +286,7 @@ } declare @llvm.riscv.vfslide1down.nxv1f32.f32( + , , float, iXLen); @@ -286,6 +299,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -319,6 +333,7 @@ } declare @llvm.riscv.vfslide1down.nxv2f32.f32( + , , float, iXLen); @@ -331,6 +346,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -364,6 +380,7 @@ } declare @llvm.riscv.vfslide1down.nxv4f32.f32( + , , float, iXLen); @@ -376,6 +393,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -409,6 +427,7 @@ } declare @llvm.riscv.vfslide1down.nxv8f32.f32( + , , float, iXLen); @@ -421,6 +440,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -454,6 +474,7 @@ } declare @llvm.riscv.vfslide1down.nxv16f32.f32( + , , float, iXLen); @@ -466,6 +487,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -499,6 +521,7 @@ } declare @llvm.riscv.vfslide1down.nxv1f64.f64( + , , double, iXLen); @@ -511,6 +534,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -544,6 +568,7 @@ } declare @llvm.riscv.vfslide1down.nxv2f64.f64( + , , double, iXLen); @@ -556,6 +581,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vfslide1down.nxv4f64.f64( + , , double, iXLen); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vfslide1down.nxv8f64.f64( + , , double, iXLen); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfslide1up.nxv1f16.f16( + , , half, iXLen); @@ -17,6 +18,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -50,6 +52,7 @@ } declare @llvm.riscv.vfslide1up.nxv2f16.f16( + , , half, iXLen); @@ -63,6 +66,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -96,6 +100,7 @@ } declare @llvm.riscv.vfslide1up.nxv4f16.f16( + , , half, iXLen); @@ -109,6 +114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -142,6 +148,7 @@ } declare @llvm.riscv.vfslide1up.nxv8f16.f16( + , , half, iXLen); @@ -155,6 +162,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -188,6 +196,7 @@ } declare @llvm.riscv.vfslide1up.nxv16f16.f16( + , , half, iXLen); @@ -201,6 +210,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -234,6 +244,7 @@ } declare @llvm.riscv.vfslide1up.nxv32f16.f16( + , , half, iXLen); @@ -247,6 +258,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -280,6 +292,7 @@ } declare @llvm.riscv.vfslide1up.nxv1f32.f32( + , , float, iXLen); @@ -293,6 +306,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -326,6 +340,7 @@ } declare @llvm.riscv.vfslide1up.nxv2f32.f32( + , , float, iXLen); @@ -339,6 +354,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -372,6 +388,7 @@ } declare @llvm.riscv.vfslide1up.nxv4f32.f32( + , , float, iXLen); @@ -385,6 +402,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -418,6 +436,7 @@ } declare @llvm.riscv.vfslide1up.nxv8f32.f32( + , , float, iXLen); @@ -431,6 +450,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -464,6 +484,7 @@ } declare @llvm.riscv.vfslide1up.nxv16f32.f32( + , , float, iXLen); @@ -477,6 +498,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -510,6 +532,7 @@ } declare @llvm.riscv.vfslide1up.nxv1f64.f64( + , , double, iXLen); @@ -523,6 +546,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -556,6 +580,7 @@ } declare @llvm.riscv.vfslide1up.nxv2f64.f64( + , , double, iXLen); @@ -569,6 +594,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -602,6 +628,7 @@ } declare @llvm.riscv.vfslide1up.nxv4f64.f64( + , , double, iXLen); @@ -615,6 +642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -648,6 +676,7 @@ } declare @llvm.riscv.vfslide1up.nxv8f64.f64( + , , double, iXLen); @@ -661,6 +690,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfsub.nxv1f16.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ } declare @llvm.riscv.vfsub.nxv2f16.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vfsub.nxv4f16.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ } declare @llvm.riscv.vfsub.nxv8f16.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ } declare @llvm.riscv.vfsub.nxv16f16.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vfsub.nxv32f16.nxv32f16( + , , , iXLen); @@ -241,6 +252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv32f16.nxv32f16( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vfsub.nxv1f32.nxv1f32( + , , , iXLen); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vfsub.nxv2f32.nxv2f32( + , , , iXLen); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vfsub.nxv4f32.nxv4f32( + , , , iXLen); @@ -377,6 +394,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -410,6 +428,7 @@ } declare @llvm.riscv.vfsub.nxv8f32.nxv8f32( + , , , iXLen); @@ -422,6 +441,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -455,6 +475,7 @@ } declare @llvm.riscv.vfsub.nxv16f32.nxv16f32( + , , , iXLen); @@ -467,6 +488,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv16f32.nxv16f32( + undef, %0, %1, iXLen %2) @@ -501,6 +523,7 @@ } declare @llvm.riscv.vfsub.nxv1f64.nxv1f64( + , , , iXLen); @@ -513,6 +536,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv1f64.nxv1f64( + undef, %0, %1, iXLen %2) @@ -546,6 +570,7 @@ } declare @llvm.riscv.vfsub.nxv2f64.nxv2f64( + , , , iXLen); @@ -558,6 +583,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv2f64.nxv2f64( + undef, %0, %1, iXLen %2) @@ -591,6 +617,7 @@ } declare @llvm.riscv.vfsub.nxv4f64.nxv4f64( + , , , iXLen); @@ -603,6 +630,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv4f64.nxv4f64( + undef, %0, %1, iXLen %2) @@ -636,6 +664,7 @@ } declare @llvm.riscv.vfsub.nxv8f64.nxv8f64( + , , , iXLen); @@ -648,6 +677,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv8f64.nxv8f64( + undef, %0, %1, iXLen %2) @@ -682,6 +712,7 @@ } declare @llvm.riscv.vfsub.nxv1f16.f16( + , , half, iXLen); @@ -694,6 +725,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -727,6 +759,7 @@ } declare @llvm.riscv.vfsub.nxv2f16.f16( + , , half, iXLen); @@ -739,6 +772,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -772,6 +806,7 @@ } declare @llvm.riscv.vfsub.nxv4f16.f16( + , , half, iXLen); @@ -784,6 +819,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -817,6 +853,7 @@ } declare @llvm.riscv.vfsub.nxv8f16.f16( + , , half, iXLen); @@ -829,6 +866,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -862,6 +900,7 @@ } declare @llvm.riscv.vfsub.nxv16f16.f16( + , , half, iXLen); @@ -874,6 +913,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -907,6 +947,7 @@ } declare @llvm.riscv.vfsub.nxv32f16.f16( + , , half, iXLen); @@ -919,6 +960,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv32f16.f16( + undef, %0, half %1, iXLen %2) @@ -952,6 +994,7 @@ } declare @llvm.riscv.vfsub.nxv1f32.f32( + , , float, iXLen); @@ -964,6 +1007,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -997,6 +1041,7 @@ } declare @llvm.riscv.vfsub.nxv2f32.f32( + , , float, iXLen); @@ -1009,6 +1054,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -1042,6 +1088,7 @@ } declare @llvm.riscv.vfsub.nxv4f32.f32( + , , float, iXLen); @@ -1054,6 +1101,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -1087,6 +1135,7 @@ } declare @llvm.riscv.vfsub.nxv8f32.f32( + , , float, iXLen); @@ -1099,6 +1148,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv8f32.f32( + undef, %0, float %1, iXLen %2) @@ -1132,6 +1182,7 @@ } declare @llvm.riscv.vfsub.nxv16f32.f32( + , , float, iXLen); @@ -1144,6 +1195,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv16f32.f32( + undef, %0, float %1, iXLen %2) @@ -1177,6 +1229,7 @@ } declare @llvm.riscv.vfsub.nxv1f64.f64( + , , double, iXLen); @@ -1189,6 +1242,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv1f64.f64( + undef, %0, double %1, iXLen %2) @@ -1222,6 +1276,7 @@ } declare @llvm.riscv.vfsub.nxv2f64.f64( + , , double, iXLen); @@ -1234,6 +1289,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv2f64.f64( + undef, %0, double %1, iXLen %2) @@ -1267,6 +1323,7 @@ } declare @llvm.riscv.vfsub.nxv4f64.f64( + , , double, iXLen); @@ -1279,6 +1336,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv4f64.f64( + undef, %0, double %1, iXLen %2) @@ -1312,6 +1370,7 @@ } declare @llvm.riscv.vfsub.nxv8f64.f64( + , , double, iXLen); @@ -1324,6 +1383,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv8f64.f64( + undef, %0, double %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( + , , , iXLen); @@ -17,6 +18,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -50,6 +52,7 @@ } declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( + , , , iXLen); @@ -63,6 +66,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -96,6 +100,7 @@ } declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( + , , , iXLen); @@ -109,6 +114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -142,6 +148,7 @@ } declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( + , , , iXLen); @@ -155,6 +162,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -188,6 +196,7 @@ } declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( + , , , iXLen); @@ -201,6 +210,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -234,6 +244,7 @@ } declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( + , , , iXLen); @@ -247,6 +258,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -280,6 +292,7 @@ } declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( + , , , iXLen); @@ -293,6 +306,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -326,6 +340,7 @@ } declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( + , , , iXLen); @@ -339,6 +354,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -372,6 +388,7 @@ } declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( + , , , iXLen); @@ -385,6 +402,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -418,6 +436,7 @@ } declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( + , , half, iXLen); @@ -431,6 +450,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -464,6 +484,7 @@ } declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( + , , half, iXLen); @@ -477,6 +498,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -510,6 +532,7 @@ } declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( + , , half, iXLen); @@ -523,6 +546,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -556,6 +580,7 @@ } declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( + , , half, iXLen); @@ -569,6 +594,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -602,6 +628,7 @@ } declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( + , , half, iXLen); @@ -615,6 +642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -648,6 +676,7 @@ } declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( + , , float, iXLen); @@ -661,6 +690,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -694,6 +724,7 @@ } declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( + , , float, iXLen); @@ -707,6 +738,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -740,6 +772,7 @@ } declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( + , , float, iXLen); @@ -753,6 +786,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -786,6 +820,7 @@ } declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( + , , float, iXLen); @@ -799,6 +834,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( + undef, %0, float %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ } declare @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ } declare @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ } declare @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16( + undef, %0, %1, iXLen %2) @@ -230,6 +240,7 @@ } declare @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( + , , , iXLen); @@ -242,6 +253,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( + , , , iXLen); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( + , , , iXLen); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( + , , , iXLen); @@ -377,6 +394,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( + undef, %0, %1, iXLen %2) @@ -411,6 +429,7 @@ } declare @llvm.riscv.vfwadd.w.nxv1f32.f16( + , , half, iXLen); @@ -423,6 +442,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv1f32.f16( + undef, %0, half %1, iXLen %2) @@ -456,6 +476,7 @@ } declare @llvm.riscv.vfwadd.w.nxv2f32.f16( + , , half, iXLen); @@ -468,6 +489,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv2f32.f16( + undef, %0, half %1, iXLen %2) @@ -501,6 +523,7 @@ } declare @llvm.riscv.vfwadd.w.nxv4f32.f16( + , , half, iXLen); @@ -513,6 +536,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv4f32.f16( + undef, %0, half %1, iXLen %2) @@ -546,6 +570,7 @@ } declare @llvm.riscv.vfwadd.w.nxv8f32.f16( + , , half, iXLen); @@ -558,6 +583,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv8f32.f16( + undef, %0, half %1, iXLen %2) @@ -591,6 +617,7 @@ } declare @llvm.riscv.vfwadd.w.nxv16f32.f16( + , , half, iXLen); @@ -603,6 +630,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv16f32.f16( + undef, %0, half %1, iXLen %2) @@ -636,6 +664,7 @@ } declare @llvm.riscv.vfwadd.w.nxv1f64.f32( + , , float, iXLen); @@ -648,6 +677,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv1f64.f32( + undef, %0, float %1, iXLen %2) @@ -681,6 +711,7 @@ } declare @llvm.riscv.vfwadd.w.nxv2f64.f32( + , , float, iXLen); @@ -693,6 +724,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv2f64.f32( + undef, %0, float %1, iXLen %2) @@ -726,6 +758,7 @@ } declare @llvm.riscv.vfwadd.w.nxv4f64.f32( + , , float, iXLen); @@ -738,6 +771,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv4f64.f32( + undef, %0, float %1, iXLen %2) @@ -771,6 +805,7 @@ } declare @llvm.riscv.vfwadd.w.nxv8f64.f32( + , , float, iXLen); @@ -783,6 +818,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv8f64.f32( + undef, %0, float %1, iXLen %2) @@ -1130,6 +1166,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( + undef, %1, %0, iXLen %2) @@ -1146,6 +1183,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16( + undef, %1, %0, iXLen %2) @@ -1162,6 +1200,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16( + undef, %1, %0, iXLen %2) @@ -1178,6 +1217,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16( + undef, %1, %0, iXLen %2) @@ -1194,6 +1234,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32( + undef, %1, %0, iXLen %2) @@ -1210,6 +1251,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32( + undef, %1, %0, iXLen %2) @@ -1226,6 +1268,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32( + undef, %1, %0, iXLen %2) @@ -1242,6 +1285,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32( + undef, %1, %0, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( + , , , iXLen); @@ -17,6 +18,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -50,6 +52,7 @@ } declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( + , , , iXLen); @@ -63,6 +66,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -96,6 +100,7 @@ } declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( + , , , iXLen); @@ -109,6 +114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -142,6 +148,7 @@ } declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( + , , , iXLen); @@ -155,6 +162,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -188,6 +196,7 @@ } declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( + , , , iXLen); @@ -201,6 +210,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -234,6 +244,7 @@ } declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( + , , , iXLen); @@ -247,6 +258,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -280,6 +292,7 @@ } declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( + , , , iXLen); @@ -293,6 +306,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -326,6 +340,7 @@ } declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( + , , , iXLen); @@ -339,6 +354,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -372,6 +388,7 @@ } declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( + , , , iXLen); @@ -385,6 +402,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -418,6 +436,7 @@ } declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( + , , half, iXLen); @@ -431,6 +450,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -464,6 +484,7 @@ } declare @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( + , , half, iXLen); @@ -477,6 +498,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -510,6 +532,7 @@ } declare @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( + , , half, iXLen); @@ -523,6 +546,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -556,6 +580,7 @@ } declare @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( + , , half, iXLen); @@ -569,6 +594,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -602,6 +628,7 @@ } declare @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( + , , half, iXLen); @@ -615,6 +642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -648,6 +676,7 @@ } declare @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( + , , float, iXLen); @@ -661,6 +690,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -694,6 +724,7 @@ } declare @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( + , , float, iXLen); @@ -707,6 +738,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -740,6 +772,7 @@ } declare @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( + , , float, iXLen); @@ -753,6 +786,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -786,6 +820,7 @@ } declare @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( + , , float, iXLen); @@ -799,6 +834,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32( + undef, %0, float %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( + , , , iXLen); @@ -17,6 +18,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( + undef, %0, %1, iXLen %2) @@ -50,6 +52,7 @@ } declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( + , , , iXLen); @@ -63,6 +66,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16( + undef, %0, %1, iXLen %2) @@ -96,6 +100,7 @@ } declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( + , , , iXLen); @@ -109,6 +114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16( + undef, %0, %1, iXLen %2) @@ -142,6 +148,7 @@ } declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( + , , , iXLen); @@ -155,6 +162,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16( + undef, %0, %1, iXLen %2) @@ -188,6 +196,7 @@ } declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( + , , , iXLen); @@ -201,6 +210,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16( + undef, %0, %1, iXLen %2) @@ -234,6 +244,7 @@ } declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( + , , , iXLen); @@ -247,6 +258,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32( + undef, %0, %1, iXLen %2) @@ -280,6 +292,7 @@ } declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( + , , , iXLen); @@ -293,6 +306,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32( + undef, %0, %1, iXLen %2) @@ -326,6 +340,7 @@ } declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( + , , , iXLen); @@ -339,6 +354,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32( + undef, %0, %1, iXLen %2) @@ -372,6 +388,7 @@ } declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( + , , , iXLen); @@ -385,6 +402,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32( + undef, %0, %1, iXLen %2) @@ -418,6 +436,7 @@ } declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( + , , half, iXLen); @@ -431,6 +450,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16( + undef, %0, half %1, iXLen %2) @@ -464,6 +484,7 @@ } declare @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( + , , half, iXLen); @@ -477,6 +498,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16( + undef, %0, half %1, iXLen %2) @@ -510,6 +532,7 @@ } declare @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( + , , half, iXLen); @@ -523,6 +546,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16( + undef, %0, half %1, iXLen %2) @@ -556,6 +580,7 @@ } declare @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( + , , half, iXLen); @@ -569,6 +594,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16( + undef, %0, half %1, iXLen %2) @@ -602,6 +628,7 @@ } declare @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( + , , half, iXLen); @@ -615,6 +642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16( + undef, %0, half %1, iXLen %2) @@ -648,6 +676,7 @@ } declare @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( + , , float, iXLen); @@ -661,6 +690,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32( + undef, %0, float %1, iXLen %2) @@ -694,6 +724,7 @@ } declare @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( + , , float, iXLen); @@ -707,6 +738,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32( + undef, %0, float %1, iXLen %2) @@ -740,6 +772,7 @@ } declare @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( + , , float, iXLen); @@ -753,6 +786,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32( + undef, %0, float %1, iXLen %2) @@ -786,6 +820,7 @@ } declare @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( + , , float, iXLen); @@ -799,6 +834,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32( + undef, %0, float %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll @@ -4,6 +4,7 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( + , , , iXLen); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( + undef, %0, %1, iXLen %2) @@ -49,6 +51,7 @@ } declare @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( + , , , iXLen); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( + undef, %0, %1, iXLen %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( + , , , iXLen); @@ -106,6 +111,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( + undef, %0, %1, iXLen %2) @@ -139,6 +145,7 @@ } declare @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( + , , , iXLen); @@ -151,6 +158,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( + undef, %0, %1, iXLen %2) @@ -184,6 +192,7 @@ } declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( + , , , iXLen); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16( + undef, %0, %1, iXLen %2) @@ -230,6 +240,7 @@ } declare @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( + , , , iXLen); @@ -242,6 +253,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( + undef, %0, %1, iXLen %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( + , , , iXLen); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( + undef, %0, %1, iXLen %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( + , , , iXLen); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( + undef, %0, %1, iXLen %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( + , , , iXLen); @@ -377,6 +394,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( + undef, %0, %1, iXLen %2) @@ -411,6 +429,7 @@ } declare @llvm.riscv.vfwsub.w.nxv1f32.f16( + , , half, iXLen); @@ -423,6 +442,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv1f32.f16( + undef, %0, half %1, iXLen %2) @@ -456,6 +476,7 @@ } declare @llvm.riscv.vfwsub.w.nxv2f32.f16( + , , half, iXLen); @@ -468,6 +489,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv2f32.f16( + undef, %0, half %1, iXLen %2) @@ -501,6 +523,7 @@ } declare @llvm.riscv.vfwsub.w.nxv4f32.f16( + , , half, iXLen); @@ -513,6 +536,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv4f32.f16( + undef, %0, half %1, iXLen %2) @@ -546,6 +570,7 @@ } declare @llvm.riscv.vfwsub.w.nxv8f32.f16( + , , half, iXLen); @@ -558,6 +583,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv8f32.f16( + undef, %0, half %1, iXLen %2) @@ -591,6 +617,7 @@ } declare @llvm.riscv.vfwsub.w.nxv16f32.f16( + , , half, iXLen); @@ -603,6 +630,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv16f32.f16( + undef, %0, half %1, iXLen %2) @@ -636,6 +664,7 @@ } declare @llvm.riscv.vfwsub.w.nxv1f64.f32( + , , float, iXLen); @@ -648,6 +677,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv1f64.f32( + undef, %0, float %1, iXLen %2) @@ -681,6 +711,7 @@ } declare @llvm.riscv.vfwsub.w.nxv2f64.f32( + , , float, iXLen); @@ -693,6 +724,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv2f64.f32( + undef, %0, float %1, iXLen %2) @@ -726,6 +758,7 @@ } declare @llvm.riscv.vfwsub.w.nxv4f64.f32( + , , float, iXLen); @@ -738,6 +771,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv4f64.f32( + undef, %0, float %1, iXLen %2) @@ -771,6 +805,7 @@ } declare @llvm.riscv.vfwsub.w.nxv8f64.f32( + , , float, iXLen); @@ -783,6 +818,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv8f64.f32( + undef, %0, float %1, iXLen %2) @@ -1130,6 +1166,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( + undef, %1, %0, iXLen %2) @@ -1146,6 +1183,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16( + undef, %1, %0, iXLen %2) @@ -1162,6 +1200,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16( + undef, %1, %0, iXLen %2) @@ -1178,6 +1217,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16( + undef, %1, %0, iXLen %2) @@ -1194,6 +1234,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32( + undef, %1, %0, iXLen %2) @@ -1210,6 +1251,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32( + undef, %1, %0, iXLen %2) @@ -1226,6 +1268,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32( + undef, %1, %0, iXLen %2) @@ -1242,6 +1285,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32( + undef, %1, %0, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmax.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vmax.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vmax.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vmax.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vmax.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vmax.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vmax.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vmax.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vmax.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vmax.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vmax.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vmax.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vmax.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vmax.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vmax.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vmax.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vmax.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vmax.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vmax.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vmax.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vmax.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vmax.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vmax.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vmax.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vmax.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vmax.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vmax.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vmax.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vmax.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vmax.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vmax.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vmax.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vmax.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vmax.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vmax.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vmax.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vmax.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vmax.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vmax.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vmax.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vmax.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vmax.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vmax.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vmax.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmax.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vmax.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vmax.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vmax.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vmax.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vmax.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vmax.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vmax.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vmax.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vmax.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vmax.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vmax.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vmax.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vmax.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vmax.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vmax.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vmax.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vmax.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vmax.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vmax.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vmax.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vmax.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vmax.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vmax.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vmax.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vmax.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vmax.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vmax.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vmax.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vmax.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vmax.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vmax.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vmax.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vmax.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vmax.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vmax.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vmax.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vmax.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vmax.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vmax.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vmax.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vmax.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vmax.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vmax.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmax.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmaxu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vmaxu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vmaxu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vmaxu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vmaxu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vmaxu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vmaxu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vmaxu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vmaxu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vmaxu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vmaxu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vmaxu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vmaxu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vmaxu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vmaxu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vmaxu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vmaxu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vmaxu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vmaxu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vmaxu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vmaxu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vmaxu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vmaxu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vmaxu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vmaxu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vmaxu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vmaxu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vmaxu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vmaxu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vmaxu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vmaxu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vmaxu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vmaxu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vmaxu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vmaxu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vmaxu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vmaxu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vmaxu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vmaxu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vmaxu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vmaxu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vmaxu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vmaxu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vmaxu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmaxu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vmaxu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vmaxu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vmaxu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vmaxu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vmaxu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vmaxu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vmaxu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vmaxu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vmaxu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vmaxu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vmaxu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vmaxu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vmaxu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vmaxu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vmaxu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vmaxu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vmaxu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vmaxu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vmaxu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vmaxu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vmaxu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vmaxu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vmaxu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vmaxu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vmaxu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vmaxu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vmaxu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vmaxu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vmaxu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vmaxu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vmaxu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vmaxu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vmaxu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vmaxu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vmaxu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vmaxu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vmaxu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vmaxu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vmaxu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vmaxu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vmaxu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vmaxu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vmaxu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmaxu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmin.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vmin.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vmin.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vmin.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vmin.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vmin.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vmin.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vmin.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vmin.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vmin.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vmin.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vmin.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vmin.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vmin.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vmin.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vmin.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vmin.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vmin.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vmin.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vmin.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vmin.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vmin.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vmin.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vmin.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vmin.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vmin.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vmin.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vmin.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vmin.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vmin.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vmin.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vmin.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vmin.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vmin.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vmin.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vmin.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vmin.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vmin.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vmin.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vmin.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vmin.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vmin.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vmin.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vmin.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmin.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vmin.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vmin.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vmin.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vmin.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vmin.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vmin.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vmin.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vmin.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vmin.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vmin.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vmin.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vmin.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vmin.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vmin.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vmin.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vmin.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vmin.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vmin.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vmin.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vmin.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vmin.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vmin.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vmin.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vmin.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vmin.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vmin.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vmin.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vmin.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vmin.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vmin.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vmin.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vmin.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vmin.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vmin.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vmin.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vmin.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vmin.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vmin.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vmin.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vmin.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vmin.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vmin.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vmin.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vminu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vminu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vminu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vminu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vminu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vminu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vminu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vminu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vminu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vminu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vminu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vminu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vminu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vminu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vminu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vminu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vminu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vminu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vminu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vminu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vminu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vminu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vminu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vminu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vminu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vminu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vminu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vminu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vminu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vminu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vminu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vminu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vminu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vminu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vminu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vminu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vminu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vminu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vminu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vminu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vminu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vminu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vminu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vminu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vminu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vminu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vminu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vminu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vminu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vminu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vminu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vminu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vminu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vminu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vminu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vminu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vminu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vminu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vminu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vminu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vminu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vminu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vminu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vminu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vminu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vminu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vminu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vminu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vminu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vminu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vminu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vminu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vminu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vminu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vminu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vminu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vminu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vminu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vminu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vminu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vminu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vminu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vminu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vminu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vminu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vminu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vminu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vminu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vminu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmul.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vmul.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vmul.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vmul.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vmul.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vmul.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vmul.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vmul.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vmul.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vmul.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vmul.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vmul.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vmul.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vmul.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vmul.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vmul.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vmul.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vmul.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vmul.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vmul.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vmul.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vmul.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vmul.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vmul.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vmul.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vmul.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vmul.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vmul.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vmul.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vmul.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vmul.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vmul.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vmul.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vmul.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vmul.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vmul.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vmul.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vmul.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vmul.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vmul.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vmul.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vmul.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vmul.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vmul.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmul.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vmul.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vmul.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vmul.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vmul.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vmul.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vmul.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vmul.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vmul.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vmul.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vmul.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vmul.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vmul.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vmul.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vmul.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vmul.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vmul.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vmul.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vmul.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vmul.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vmul.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vmul.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vmul.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vmul.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vmul.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vmul.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vmul.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vmul.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vmul.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vmul.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vmul.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vmul.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vmul.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vmul.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vmul.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vmul.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vmul.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vmul.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vmul.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vmul.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vmul.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vmul.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vmul.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vmul.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmul.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmulh.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vmulh.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vmulh.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vmulh.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vmulh.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vmulh.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vmulh.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vmulh.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vmulh.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vmulh.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vmulh.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vmulh.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vmulh.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vmulh.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vmulh.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vmulh.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vmulh.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vmulh.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vmulh.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vmulh.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vmulh.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vmulh.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vmulh.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vmulh.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vmulh.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vmulh.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vmulh.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vmulh.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vmulh.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vmulh.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vmulh.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vmulh.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vmulh.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vmulh.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vmulh.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vmulh.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vmulh.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vmulh.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vmulh.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vmulh.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vmulh.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vmulh.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vmulh.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vmulh.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmulh.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vmulh.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vmulh.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vmulh.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vmulh.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vmulh.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vmulh.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vmulh.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vmulh.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vmulh.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vmulh.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vmulh.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vmulh.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vmulh.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vmulh.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vmulh.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vmulh.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vmulh.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vmulh.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vmulh.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vmulh.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vmulh.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vmulh.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vmulh.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vmulh.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vmulh.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vmulh.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vmulh.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vmulh.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vmulh.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vmulh.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vmulh.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vmulh.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vmulh.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vmulh.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vmulh.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vmulh.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vmulh.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vmulh.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vmulh.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vmulh.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vmulh.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vmulh.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vmulh.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulh.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vmulhsu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vmulhsu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vmulhsu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vmulhsu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vmulhsu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vmulhsu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vmulhsu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vmulhsu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vmulhsu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vmulhsu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vmulhsu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vmulhsu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vmulhsu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vmulhsu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vmulhsu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vmulhsu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vmulhsu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vmulhsu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vmulhsu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vmulhsu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vmulhsu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vmulhsu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vmulhsu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vmulhsu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vmulhsu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vmulhsu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vmulhsu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vmulhsu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vmulhsu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vmulhsu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vmulhsu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vmulhsu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vmulhsu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vmulhsu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vmulhsu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vmulhsu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vmulhsu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vmulhsu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vmulhsu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vmulhsu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vmulhsu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vmulhsu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vmulhsu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vmulhsu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vmulhsu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vmulhsu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vmulhsu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vmulhsu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vmulhsu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vmulhsu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vmulhsu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vmulhsu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhsu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vmulhu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vmulhu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vmulhu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vmulhu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vmulhu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vmulhu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vmulhu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vmulhu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vmulhu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vmulhu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vmulhu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vmulhu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vmulhu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vmulhu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vmulhu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vmulhu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vmulhu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vmulhu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vmulhu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vmulhu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vmulhu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vmulhu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vmulhu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vmulhu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vmulhu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vmulhu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vmulhu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vmulhu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vmulhu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vmulhu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vmulhu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vmulhu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vmulhu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vmulhu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vmulhu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vmulhu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vmulhu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vmulhu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vmulhu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vmulhu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vmulhu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vmulhu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vmulhu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vmulhu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vmulhu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vmulhu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vmulhu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vmulhu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vmulhu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vmulhu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vmulhu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vmulhu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vmulhu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vmulhu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vmulhu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vmulhu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vmulhu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vmulhu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vmulhu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vmulhu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vmulhu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vmulhu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vmulhu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vmulhu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vmulhu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vmulhu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vmulhu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vmulhu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vmulhu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vmulhu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vmulhu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vmulhu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vmulhu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vmulhu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vmulhu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vmulhu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vmulhu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vmulhu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vmulhu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vmulhu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vmulhu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vmulhu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vmulhu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vmulhu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vmulhu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vmulhu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmulhu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8( + , , , i32); @@ -150,6 +157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -183,6 +191,7 @@ } declare @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8( + , , , i32); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8( + , , , i32); @@ -242,6 +253,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16( + , , , i32); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16( + , , , i32); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16( + , , , i32); @@ -378,6 +395,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -411,6 +429,7 @@ } declare @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16( + , , , i32); @@ -424,6 +443,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -457,6 +477,7 @@ } declare @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16( + , , , i32); @@ -470,6 +491,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -503,6 +525,7 @@ } declare @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32( + , , , i32); @@ -515,6 +538,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -548,6 +572,7 @@ } declare @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32( + , , , i32); @@ -561,6 +586,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -594,6 +620,7 @@ } declare @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32( + , , , i32); @@ -607,6 +634,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -640,6 +668,7 @@ } declare @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32( + , , , i32); @@ -653,6 +682,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -686,6 +716,7 @@ } declare @llvm.riscv.vnclip.nxv1i8.nxv1i16( + , , i32, i32); @@ -698,6 +729,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -731,6 +763,7 @@ } declare @llvm.riscv.vnclip.nxv2i8.nxv2i16( + , , i32, i32); @@ -743,6 +776,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i8.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -776,6 +810,7 @@ } declare @llvm.riscv.vnclip.nxv4i8.nxv4i16( + , , i32, i32); @@ -788,6 +823,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i8.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -821,6 +857,7 @@ } declare @llvm.riscv.vnclip.nxv8i8.nxv8i16( + , , i32, i32); @@ -834,6 +871,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -867,6 +905,7 @@ } declare @llvm.riscv.vnclip.nxv16i8.nxv16i16( + , , i32, i32); @@ -880,6 +919,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -913,6 +953,7 @@ } declare @llvm.riscv.vnclip.nxv32i8.nxv32i16( + , , i32, i32); @@ -926,6 +967,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -959,6 +1001,7 @@ } declare @llvm.riscv.vnclip.nxv1i16.nxv1i32( + , , i32, i32); @@ -971,6 +1014,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i16.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1004,6 +1048,7 @@ } declare @llvm.riscv.vnclip.nxv2i16.nxv2i32( + , , i32, i32); @@ -1016,6 +1061,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i16.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1049,6 +1095,7 @@ } declare @llvm.riscv.vnclip.nxv4i16.nxv4i32( + , , i32, i32); @@ -1062,6 +1109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1095,6 +1143,7 @@ } declare @llvm.riscv.vnclip.nxv8i16.nxv8i32( + , , i32, i32); @@ -1108,6 +1157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1141,6 +1191,7 @@ } declare @llvm.riscv.vnclip.nxv16i16.nxv16i32( + , , i32, i32); @@ -1154,6 +1205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1187,6 +1239,7 @@ } declare @llvm.riscv.vnclip.nxv1i32.nxv1i64( + , , i32, i32); @@ -1199,6 +1252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i32.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1232,6 +1286,7 @@ } declare @llvm.riscv.vnclip.nxv2i32.nxv2i64( + , , i32, i32); @@ -1245,6 +1300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1278,6 +1334,7 @@ } declare @llvm.riscv.vnclip.nxv4i32.nxv4i64( + , , i32, i32); @@ -1291,6 +1348,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1324,6 +1382,7 @@ } declare @llvm.riscv.vnclip.nxv8i32.nxv8i64( + , , i32, i32); @@ -1337,6 +1396,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1377,6 +1437,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -1409,6 +1470,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i8.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -1441,6 +1503,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i8.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -1474,6 +1537,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -1507,6 +1571,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -1540,6 +1605,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -1572,6 +1638,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i16.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -1604,6 +1671,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i16.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -1637,6 +1705,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -1670,6 +1739,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -1703,6 +1773,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -1735,6 +1806,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i32.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -1768,6 +1840,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -1801,6 +1874,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -1834,6 +1908,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8( + , , , i64); @@ -150,6 +157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -183,6 +191,7 @@ } declare @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8( + , , , i64); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8( + , , , i64); @@ -242,6 +253,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16( + , , , i64); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16( + , , , i64); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16( + , , , i64); @@ -378,6 +395,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -411,6 +429,7 @@ } declare @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16( + , , , i64); @@ -424,6 +443,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -457,6 +477,7 @@ } declare @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16( + , , , i64); @@ -470,6 +491,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -503,6 +525,7 @@ } declare @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32( + , , , i64); @@ -515,6 +538,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -548,6 +572,7 @@ } declare @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32( + , , , i64); @@ -561,6 +586,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -594,6 +620,7 @@ } declare @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32( + , , , i64); @@ -607,6 +634,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -640,6 +668,7 @@ } declare @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32( + , , , i64); @@ -653,6 +682,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -686,6 +716,7 @@ } declare @llvm.riscv.vnclip.nxv1i8.nxv1i16( + , , i64, i64); @@ -698,6 +729,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -731,6 +763,7 @@ } declare @llvm.riscv.vnclip.nxv2i8.nxv2i16( + , , i64, i64); @@ -743,6 +776,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i8.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -776,6 +810,7 @@ } declare @llvm.riscv.vnclip.nxv4i8.nxv4i16( + , , i64, i64); @@ -788,6 +823,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i8.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -821,6 +857,7 @@ } declare @llvm.riscv.vnclip.nxv8i8.nxv8i16( + , , i64, i64); @@ -834,6 +871,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -867,6 +905,7 @@ } declare @llvm.riscv.vnclip.nxv16i8.nxv16i16( + , , i64, i64); @@ -880,6 +919,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -913,6 +953,7 @@ } declare @llvm.riscv.vnclip.nxv32i8.nxv32i16( + , , i64, i64); @@ -926,6 +967,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -959,6 +1001,7 @@ } declare @llvm.riscv.vnclip.nxv1i16.nxv1i32( + , , i64, i64); @@ -971,6 +1014,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i16.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1004,6 +1048,7 @@ } declare @llvm.riscv.vnclip.nxv2i16.nxv2i32( + , , i64, i64); @@ -1016,6 +1061,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i16.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1049,6 +1095,7 @@ } declare @llvm.riscv.vnclip.nxv4i16.nxv4i32( + , , i64, i64); @@ -1062,6 +1109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1095,6 +1143,7 @@ } declare @llvm.riscv.vnclip.nxv8i16.nxv8i32( + , , i64, i64); @@ -1108,6 +1157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1141,6 +1191,7 @@ } declare @llvm.riscv.vnclip.nxv16i16.nxv16i32( + , , i64, i64); @@ -1154,6 +1205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1187,6 +1239,7 @@ } declare @llvm.riscv.vnclip.nxv1i32.nxv1i64( + , , i64, i64); @@ -1199,6 +1252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i32.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1232,6 +1286,7 @@ } declare @llvm.riscv.vnclip.nxv2i32.nxv2i64( + , , i64, i64); @@ -1245,6 +1300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1278,6 +1334,7 @@ } declare @llvm.riscv.vnclip.nxv4i32.nxv4i64( + , , i64, i64); @@ -1291,6 +1348,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1324,6 +1382,7 @@ } declare @llvm.riscv.vnclip.nxv8i32.nxv8i64( + , , i64, i64); @@ -1337,6 +1396,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1377,6 +1437,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i8.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -1409,6 +1470,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i8.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -1441,6 +1503,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i8.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -1474,6 +1537,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -1507,6 +1571,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -1540,6 +1605,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -1572,6 +1638,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i16.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -1604,6 +1671,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i16.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -1637,6 +1705,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -1670,6 +1739,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -1703,6 +1773,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -1735,6 +1806,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv1i32.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -1768,6 +1840,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -1801,6 +1874,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -1834,6 +1908,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8( + , , , i32); @@ -150,6 +157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -183,6 +191,7 @@ } declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8( + , , , i32); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8( + , , , i32); @@ -242,6 +253,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16( + , , , i32); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16( + , , , i32); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16( + , , , i32); @@ -378,6 +395,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -411,6 +429,7 @@ } declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16( + , , , i32); @@ -424,6 +443,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -457,6 +477,7 @@ } declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16( + , , , i32); @@ -470,6 +491,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -503,6 +525,7 @@ } declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32( + , , , i32); @@ -515,6 +538,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -548,6 +572,7 @@ } declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( + , , , i32); @@ -561,6 +586,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -594,6 +620,7 @@ } declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( + , , , i32); @@ -607,6 +634,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -640,6 +668,7 @@ } declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( + , , , i32); @@ -653,6 +682,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -686,6 +716,7 @@ } declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16( + , , i32, i32); @@ -698,6 +729,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -731,6 +763,7 @@ } declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16( + , , i32, i32); @@ -743,6 +776,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -776,6 +810,7 @@ } declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16( + , , i32, i32); @@ -788,6 +823,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -821,6 +857,7 @@ } declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16( + , , i32, i32); @@ -834,6 +871,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -867,6 +905,7 @@ } declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16( + , , i32, i32); @@ -880,6 +919,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -913,6 +953,7 @@ } declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16( + , , i32, i32); @@ -926,6 +967,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -959,6 +1001,7 @@ } declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32( + , , i32, i32); @@ -971,6 +1014,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1004,6 +1048,7 @@ } declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32( + , , i32, i32); @@ -1016,6 +1061,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1049,6 +1095,7 @@ } declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32( + , , i32, i32); @@ -1062,6 +1109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1095,6 +1143,7 @@ } declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32( + , , i32, i32); @@ -1108,6 +1157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1141,6 +1191,7 @@ } declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32( + , , i32, i32); @@ -1154,6 +1205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1187,6 +1239,7 @@ } declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + , , i32, i32); @@ -1199,6 +1252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1232,6 +1286,7 @@ } declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + , , i32, i32); @@ -1245,6 +1300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1278,6 +1334,7 @@ } declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + , , i32, i32); @@ -1291,6 +1348,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1324,6 +1382,7 @@ } declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + , , i32, i32); @@ -1337,6 +1396,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1377,6 +1437,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -1409,6 +1470,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -1441,6 +1503,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -1474,6 +1537,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -1507,6 +1571,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -1540,6 +1605,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -1572,6 +1638,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -1604,6 +1671,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -1637,6 +1705,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -1670,6 +1739,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -1703,6 +1773,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -1735,6 +1806,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -1768,6 +1840,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -1801,6 +1874,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -1834,6 +1908,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8( + , , , i64); @@ -150,6 +157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -183,6 +191,7 @@ } declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8( + , , , i64); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8( + , , , i64); @@ -242,6 +253,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16( + , , , i64); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16( + , , , i64); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16( + , , , i64); @@ -378,6 +395,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -411,6 +429,7 @@ } declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16( + , , , i64); @@ -424,6 +443,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -457,6 +477,7 @@ } declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16( + , , , i64); @@ -470,6 +491,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -503,6 +525,7 @@ } declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32( + , , , i64); @@ -515,6 +538,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -548,6 +572,7 @@ } declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( + , , , i64); @@ -561,6 +586,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -594,6 +620,7 @@ } declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( + , , , i64); @@ -607,6 +634,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -640,6 +668,7 @@ } declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( + , , , i64); @@ -653,6 +682,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -686,6 +716,7 @@ } declare @llvm.riscv.vnclipu.nxv1i8.nxv1i16( + , , i64, i64); @@ -698,6 +729,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -731,6 +763,7 @@ } declare @llvm.riscv.vnclipu.nxv2i8.nxv2i16( + , , i64, i64); @@ -743,6 +776,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -776,6 +810,7 @@ } declare @llvm.riscv.vnclipu.nxv4i8.nxv4i16( + , , i64, i64); @@ -788,6 +823,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -821,6 +857,7 @@ } declare @llvm.riscv.vnclipu.nxv8i8.nxv8i16( + , , i64, i64); @@ -834,6 +871,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -867,6 +905,7 @@ } declare @llvm.riscv.vnclipu.nxv16i8.nxv16i16( + , , i64, i64); @@ -880,6 +919,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -913,6 +953,7 @@ } declare @llvm.riscv.vnclipu.nxv32i8.nxv32i16( + , , i64, i64); @@ -926,6 +967,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -959,6 +1001,7 @@ } declare @llvm.riscv.vnclipu.nxv1i16.nxv1i32( + , , i64, i64); @@ -971,6 +1014,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1004,6 +1048,7 @@ } declare @llvm.riscv.vnclipu.nxv2i16.nxv2i32( + , , i64, i64); @@ -1016,6 +1061,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1049,6 +1095,7 @@ } declare @llvm.riscv.vnclipu.nxv4i16.nxv4i32( + , , i64, i64); @@ -1062,6 +1109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1095,6 +1143,7 @@ } declare @llvm.riscv.vnclipu.nxv8i16.nxv8i32( + , , i64, i64); @@ -1108,6 +1157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1141,6 +1191,7 @@ } declare @llvm.riscv.vnclipu.nxv16i16.nxv16i32( + , , i64, i64); @@ -1154,6 +1205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1187,6 +1239,7 @@ } declare @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + , , i64, i64); @@ -1199,6 +1252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1232,6 +1286,7 @@ } declare @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + , , i64, i64); @@ -1245,6 +1300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1278,6 +1334,7 @@ } declare @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + , , i64, i64); @@ -1291,6 +1348,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1324,6 +1382,7 @@ } declare @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + , , i64, i64); @@ -1337,6 +1396,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1377,6 +1437,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -1409,6 +1470,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -1441,6 +1503,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -1474,6 +1537,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -1507,6 +1571,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -1540,6 +1605,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -1572,6 +1638,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -1604,6 +1671,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -1637,6 +1705,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -1670,6 +1739,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -1703,6 +1773,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -1735,6 +1806,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -1768,6 +1840,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -1801,6 +1874,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -1834,6 +1908,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8( + , , , i32); @@ -150,6 +157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -183,6 +191,7 @@ } declare @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8( + , , , i32); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8( + , , , i32); @@ -242,6 +253,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16( + , , , i32); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16( + , , , i32); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( + , , , i32); @@ -378,6 +395,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -411,6 +429,7 @@ } declare @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16( + , , , i32); @@ -424,6 +443,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -457,6 +477,7 @@ } declare @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16( + , , , i32); @@ -470,6 +491,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -503,6 +525,7 @@ } declare @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32( + , , , i32); @@ -515,6 +538,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -548,6 +572,7 @@ } declare @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( + , , , i32); @@ -561,6 +586,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -594,6 +620,7 @@ } declare @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( + , , , i32); @@ -607,6 +634,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -640,6 +668,7 @@ } declare @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( + , , , i32); @@ -653,6 +682,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -686,6 +716,7 @@ } declare @llvm.riscv.vnsra.nxv1i8.nxv1i16( + , , i32, i32); @@ -698,6 +729,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -731,6 +763,7 @@ } declare @llvm.riscv.vnsra.nxv2i8.nxv2i16( + , , i32, i32); @@ -743,6 +776,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i8.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -776,6 +810,7 @@ } declare @llvm.riscv.vnsra.nxv4i8.nxv4i16( + , , i32, i32); @@ -788,6 +823,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i8.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -821,6 +857,7 @@ } declare @llvm.riscv.vnsra.nxv8i8.nxv8i16( + , , i32, i32); @@ -834,6 +871,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -867,6 +905,7 @@ } declare @llvm.riscv.vnsra.nxv16i8.nxv16i16( + , , i32, i32); @@ -880,6 +919,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -913,6 +953,7 @@ } declare @llvm.riscv.vnsra.nxv32i8.nxv32i16( + , , i32, i32); @@ -926,6 +967,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -959,6 +1001,7 @@ } declare @llvm.riscv.vnsra.nxv1i16.nxv1i32( + , , i32, i32); @@ -971,6 +1014,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i16.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1004,6 +1048,7 @@ } declare @llvm.riscv.vnsra.nxv2i16.nxv2i32( + , , i32, i32); @@ -1016,6 +1061,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i16.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1049,6 +1095,7 @@ } declare @llvm.riscv.vnsra.nxv4i16.nxv4i32( + , , i32, i32); @@ -1062,6 +1109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1095,6 +1143,7 @@ } declare @llvm.riscv.vnsra.nxv8i16.nxv8i32( + , , i32, i32); @@ -1108,6 +1157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1141,6 +1191,7 @@ } declare @llvm.riscv.vnsra.nxv16i16.nxv16i32( + , , i32, i32); @@ -1154,6 +1205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1187,6 +1239,7 @@ } declare @llvm.riscv.vnsra.nxv1i32.nxv1i64( + , , i32, i32); @@ -1199,6 +1252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i32.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1232,6 +1286,7 @@ } declare @llvm.riscv.vnsra.nxv2i32.nxv2i64( + , , i32, i32); @@ -1245,6 +1300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1278,6 +1334,7 @@ } declare @llvm.riscv.vnsra.nxv4i32.nxv4i64( + , , i32, i32); @@ -1291,6 +1348,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1324,6 +1382,7 @@ } declare @llvm.riscv.vnsra.nxv8i32.nxv8i64( + , , i32, i32); @@ -1337,6 +1396,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1377,6 +1437,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -1409,6 +1470,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i8.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -1441,6 +1503,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i8.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -1474,6 +1537,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -1507,6 +1571,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -1540,6 +1605,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -1572,6 +1638,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i16.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -1604,6 +1671,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i16.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -1637,6 +1705,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -1670,6 +1739,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -1703,6 +1773,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -1735,6 +1806,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i32.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -1768,6 +1840,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -1801,6 +1874,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -1834,6 +1908,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8( + , , , i64); @@ -150,6 +157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -183,6 +191,7 @@ } declare @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8( + , , , i64); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8( + , , , i64); @@ -242,6 +253,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16( + , , , i64); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16( + , , , i64); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( + , , , i64); @@ -378,6 +395,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -411,6 +429,7 @@ } declare @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16( + , , , i64); @@ -424,6 +443,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -457,6 +477,7 @@ } declare @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16( + , , , i64); @@ -470,6 +491,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -503,6 +525,7 @@ } declare @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32( + , , , i64); @@ -515,6 +538,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -548,6 +572,7 @@ } declare @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( + , , , i64); @@ -561,6 +586,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -594,6 +620,7 @@ } declare @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( + , , , i64); @@ -607,6 +634,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -640,6 +668,7 @@ } declare @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( + , , , i64); @@ -653,6 +682,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -686,6 +716,7 @@ } declare @llvm.riscv.vnsra.nxv1i8.nxv1i16( + , , i64, i64); @@ -698,6 +729,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -731,6 +763,7 @@ } declare @llvm.riscv.vnsra.nxv2i8.nxv2i16( + , , i64, i64); @@ -743,6 +776,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i8.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -776,6 +810,7 @@ } declare @llvm.riscv.vnsra.nxv4i8.nxv4i16( + , , i64, i64); @@ -788,6 +823,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i8.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -821,6 +857,7 @@ } declare @llvm.riscv.vnsra.nxv8i8.nxv8i16( + , , i64, i64); @@ -834,6 +871,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -867,6 +905,7 @@ } declare @llvm.riscv.vnsra.nxv16i8.nxv16i16( + , , i64, i64); @@ -880,6 +919,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -913,6 +953,7 @@ } declare @llvm.riscv.vnsra.nxv32i8.nxv32i16( + , , i64, i64); @@ -926,6 +967,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -959,6 +1001,7 @@ } declare @llvm.riscv.vnsra.nxv1i16.nxv1i32( + , , i64, i64); @@ -971,6 +1014,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i16.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1004,6 +1048,7 @@ } declare @llvm.riscv.vnsra.nxv2i16.nxv2i32( + , , i64, i64); @@ -1016,6 +1061,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i16.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1049,6 +1095,7 @@ } declare @llvm.riscv.vnsra.nxv4i16.nxv4i32( + , , i64, i64); @@ -1062,6 +1109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1095,6 +1143,7 @@ } declare @llvm.riscv.vnsra.nxv8i16.nxv8i32( + , , i64, i64); @@ -1108,6 +1157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1141,6 +1191,7 @@ } declare @llvm.riscv.vnsra.nxv16i16.nxv16i32( + , , i64, i64); @@ -1154,6 +1205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1187,6 +1239,7 @@ } declare @llvm.riscv.vnsra.nxv1i32.nxv1i64( + , , i64, i64); @@ -1199,6 +1252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i32.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1232,6 +1286,7 @@ } declare @llvm.riscv.vnsra.nxv2i32.nxv2i64( + , , i64, i64); @@ -1245,6 +1300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1278,6 +1334,7 @@ } declare @llvm.riscv.vnsra.nxv4i32.nxv4i64( + , , i64, i64); @@ -1291,6 +1348,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1324,6 +1382,7 @@ } declare @llvm.riscv.vnsra.nxv8i32.nxv8i64( + , , i64, i64); @@ -1337,6 +1396,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1377,6 +1437,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i8.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -1409,6 +1470,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i8.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -1441,6 +1503,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i8.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -1474,6 +1537,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -1507,6 +1571,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -1540,6 +1605,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -1572,6 +1638,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i16.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -1604,6 +1671,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i16.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -1637,6 +1705,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -1670,6 +1739,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -1703,6 +1773,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -1735,6 +1806,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv1i32.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -1768,6 +1840,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -1801,6 +1874,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -1834,6 +1908,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8( + , , , i32); @@ -150,6 +157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -183,6 +191,7 @@ } declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8( + , , , i32); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8( + , , , i32); @@ -242,6 +253,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16( + , , , i32); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16( + , , , i32); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( + , , , i32); @@ -378,6 +395,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -411,6 +429,7 @@ } declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16( + , , , i32); @@ -424,6 +443,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -457,6 +477,7 @@ } declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16( + , , , i32); @@ -470,6 +491,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -503,6 +525,7 @@ } declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32( + , , , i32); @@ -515,6 +538,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -548,6 +572,7 @@ } declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( + , , , i32); @@ -561,6 +586,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -594,6 +620,7 @@ } declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( + , , , i32); @@ -607,6 +634,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -640,6 +668,7 @@ } declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( + , , , i32); @@ -653,6 +682,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -686,6 +716,7 @@ } declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16( + , , i32, i32); @@ -698,6 +729,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -731,6 +763,7 @@ } declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16( + , , i32, i32); @@ -743,6 +776,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -776,6 +810,7 @@ } declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16( + , , i32, i32); @@ -788,6 +823,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -821,6 +857,7 @@ } declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16( + , , i32, i32); @@ -834,6 +871,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -867,6 +905,7 @@ } declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16( + , , i32, i32); @@ -880,6 +919,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -913,6 +953,7 @@ } declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16( + , , i32, i32); @@ -926,6 +967,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -959,6 +1001,7 @@ } declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32( + , , i32, i32); @@ -971,6 +1014,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1004,6 +1048,7 @@ } declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32( + , , i32, i32); @@ -1016,6 +1061,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1049,6 +1095,7 @@ } declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32( + , , i32, i32); @@ -1062,6 +1109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1095,6 +1143,7 @@ } declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32( + , , i32, i32); @@ -1108,6 +1157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1141,6 +1191,7 @@ } declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32( + , , i32, i32); @@ -1154,6 +1205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1187,6 +1239,7 @@ } declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64( + , , i32, i32); @@ -1199,6 +1252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1232,6 +1286,7 @@ } declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64( + , , i32, i32); @@ -1245,6 +1300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1278,6 +1334,7 @@ } declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64( + , , i32, i32); @@ -1291,6 +1348,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1324,6 +1382,7 @@ } declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64( + , , i32, i32); @@ -1337,6 +1396,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1377,6 +1437,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -1409,6 +1470,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -1441,6 +1503,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -1474,6 +1537,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -1507,6 +1571,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -1540,6 +1605,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -1572,6 +1638,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -1604,6 +1671,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -1637,6 +1705,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -1670,6 +1739,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -1703,6 +1773,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -1735,6 +1806,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -1768,6 +1840,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -1801,6 +1874,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -1834,6 +1908,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8( + , , , i64); @@ -150,6 +157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -183,6 +191,7 @@ } declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8( + , , , i64); @@ -196,6 +205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -229,6 +239,7 @@ } declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8( + , , , i64); @@ -242,6 +253,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -275,6 +287,7 @@ } declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16( + , , , i64); @@ -287,6 +300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -320,6 +334,7 @@ } declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16( + , , , i64); @@ -332,6 +347,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -365,6 +381,7 @@ } declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( + , , , i64); @@ -378,6 +395,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -411,6 +429,7 @@ } declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16( + , , , i64); @@ -424,6 +443,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -457,6 +477,7 @@ } declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16( + , , , i64); @@ -470,6 +491,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -503,6 +525,7 @@ } declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32( + , , , i64); @@ -515,6 +538,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -548,6 +572,7 @@ } declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( + , , , i64); @@ -561,6 +586,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -594,6 +620,7 @@ } declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( + , , , i64); @@ -607,6 +634,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -640,6 +668,7 @@ } declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( + , , , i64); @@ -653,6 +682,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -686,6 +716,7 @@ } declare @llvm.riscv.vnsrl.nxv1i8.nxv1i16( + , , i64, i64); @@ -698,6 +729,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -731,6 +763,7 @@ } declare @llvm.riscv.vnsrl.nxv2i8.nxv2i16( + , , i64, i64); @@ -743,6 +776,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -776,6 +810,7 @@ } declare @llvm.riscv.vnsrl.nxv4i8.nxv4i16( + , , i64, i64); @@ -788,6 +823,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -821,6 +857,7 @@ } declare @llvm.riscv.vnsrl.nxv8i8.nxv8i16( + , , i64, i64); @@ -834,6 +871,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -867,6 +905,7 @@ } declare @llvm.riscv.vnsrl.nxv16i8.nxv16i16( + , , i64, i64); @@ -880,6 +919,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -913,6 +953,7 @@ } declare @llvm.riscv.vnsrl.nxv32i8.nxv32i16( + , , i64, i64); @@ -926,6 +967,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -959,6 +1001,7 @@ } declare @llvm.riscv.vnsrl.nxv1i16.nxv1i32( + , , i64, i64); @@ -971,6 +1014,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1004,6 +1048,7 @@ } declare @llvm.riscv.vnsrl.nxv2i16.nxv2i32( + , , i64, i64); @@ -1016,6 +1061,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1049,6 +1095,7 @@ } declare @llvm.riscv.vnsrl.nxv4i16.nxv4i32( + , , i64, i64); @@ -1062,6 +1109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1095,6 +1143,7 @@ } declare @llvm.riscv.vnsrl.nxv8i16.nxv8i32( + , , i64, i64); @@ -1108,6 +1157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1141,6 +1191,7 @@ } declare @llvm.riscv.vnsrl.nxv16i16.nxv16i32( + , , i64, i64); @@ -1154,6 +1205,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1187,6 +1239,7 @@ } declare @llvm.riscv.vnsrl.nxv1i32.nxv1i64( + , , i64, i64); @@ -1199,6 +1252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1232,6 +1286,7 @@ } declare @llvm.riscv.vnsrl.nxv2i32.nxv2i64( + , , i64, i64); @@ -1245,6 +1300,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1278,6 +1334,7 @@ } declare @llvm.riscv.vnsrl.nxv4i32.nxv4i64( + , , i64, i64); @@ -1291,6 +1348,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1324,6 +1382,7 @@ } declare @llvm.riscv.vnsrl.nxv8i32.nxv8i64( + , , i64, i64); @@ -1337,6 +1396,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1377,6 +1437,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -1409,6 +1470,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -1441,6 +1503,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -1474,6 +1537,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -1507,6 +1571,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -1540,6 +1605,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -1572,6 +1638,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -1604,6 +1671,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -1637,6 +1705,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -1670,6 +1739,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -1703,6 +1773,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -1735,6 +1806,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -1768,6 +1840,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -1801,6 +1874,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -1834,6 +1908,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vor.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vor.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vor.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vor.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vor.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vor.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vor.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vor.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vor.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vor.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vor.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vor.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vor.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vor.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vor.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vor.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vor.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vor.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vor.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vor.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vor.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vor.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vor.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vor.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vor.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vor.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vor.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vor.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vor.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vor.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vor.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vor.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vor.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vor.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vor.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vor.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vor.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vor.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vor.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vor.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vor.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vor.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vor.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vor.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -2041,6 +2129,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2073,6 +2162,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2105,6 +2195,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2137,6 +2228,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2169,6 +2261,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2201,6 +2294,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2233,6 +2327,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv64i8.i8( + undef, %0, i8 9, i32 %1) @@ -2265,6 +2360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2297,6 +2393,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2329,6 +2426,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2361,6 +2459,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2393,6 +2492,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2425,6 +2525,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2457,6 +2558,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2489,6 +2591,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2521,6 +2624,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2553,6 +2657,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2585,6 +2690,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2617,6 +2723,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2649,6 +2756,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2681,6 +2789,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2713,6 +2822,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vor.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vor.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vor.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vor.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vor.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vor.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vor.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vor.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vor.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vor.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vor.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vor.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vor.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vor.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vor.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vor.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vor.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vor.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vor.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vor.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vor.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vor.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vor.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vor.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vor.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vor.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vor.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vor.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vor.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vor.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vor.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vor.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vor.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vor.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vor.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vor.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vor.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vor.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vor.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vor.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vor.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vor.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vor.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vor.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vor.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrem.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vrem.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vrem.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vrem.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vrem.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vrem.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vrem.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vrem.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vrem.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vrem.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vrem.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vrem.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vrem.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vrem.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vrem.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vrem.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vrem.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vrem.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vrem.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vrem.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vrem.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vrem.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vrem.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vrem.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vrem.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vrem.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vrem.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vrem.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vrem.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vrem.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vrem.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vrem.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vrem.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vrem.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vrem.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vrem.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vrem.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vrem.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vrem.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vrem.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vrem.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vrem.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vrem.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vrem.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrem.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vrem.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vrem.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vrem.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vrem.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vrem.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vrem.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vrem.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vrem.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vrem.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vrem.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vrem.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vrem.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vrem.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vrem.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vrem.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vrem.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vrem.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vrem.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vrem.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vrem.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vrem.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vrem.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vrem.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vrem.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vrem.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vrem.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vrem.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vrem.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vrem.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vrem.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vrem.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vrem.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vrem.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vrem.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vrem.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vrem.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vrem.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vrem.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vrem.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vrem.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vrem.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vrem.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vrem.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrem.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vremu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vremu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vremu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vremu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vremu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vremu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vremu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vremu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vremu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vremu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vremu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vremu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vremu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vremu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vremu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vremu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vremu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vremu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vremu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vremu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vremu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vremu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vremu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vremu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vremu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vremu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vremu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vremu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vremu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vremu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vremu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vremu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vremu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vremu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vremu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vremu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vremu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vremu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vremu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vremu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vremu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vremu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vremu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vremu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vremu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vremu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vremu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vremu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vremu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vremu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vremu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vremu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vremu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vremu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vremu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vremu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vremu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vremu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vremu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vremu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vremu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vremu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vremu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vremu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vremu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vremu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vremu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vremu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vremu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vremu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vremu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vremu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vremu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vremu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vremu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vremu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vremu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vremu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vremu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vremu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vremu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vremu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vremu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vremu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vremu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vremu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vremu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vremu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vremu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrgather.vv.nxv1i8.i32( + , , , i32); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1i8.i32( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vrgather.vv.nxv2i8.i32( + , , , i32); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2i8.i32( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vrgather.vv.nxv4i8.i32( + , , , i32); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4i8.i32( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vrgather.vv.nxv8i8.i32( + , , , i32); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i8.i32( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vrgather.vv.nxv16i8.i32( + , , , i32); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16i8.i32( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vrgather.vv.nxv32i8.i32( + , , , i32); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv32i8.i32( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vrgather.vv.nxv64i8.i32( + , , , i32); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv64i8.i32( + undef, %0, %1, i32 %2) @@ -325,6 +339,7 @@ } declare @llvm.riscv.vrgather.vv.nxv1i16.i32( + , , , i32); @@ -338,6 +353,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1i16.i32( + undef, %0, %1, i32 %2) @@ -371,6 +387,7 @@ } declare @llvm.riscv.vrgather.vv.nxv2i16.i32( + , , , i32); @@ -384,6 +401,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2i16.i32( + undef, %0, %1, i32 %2) @@ -417,6 +435,7 @@ } declare @llvm.riscv.vrgather.vv.nxv4i16.i32( + , , , i32); @@ -430,6 +449,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4i16.i32( + undef, %0, %1, i32 %2) @@ -463,6 +483,7 @@ } declare @llvm.riscv.vrgather.vv.nxv8i16.i32( + , , , i32); @@ -476,6 +497,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i16.i32( + undef, %0, %1, i32 %2) @@ -509,6 +531,7 @@ } declare @llvm.riscv.vrgather.vv.nxv16i16.i32( + , , , i32); @@ -522,6 +545,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16i16.i32( + undef, %0, %1, i32 %2) @@ -555,6 +579,7 @@ } declare @llvm.riscv.vrgather.vv.nxv32i16.i32( + , , , i32); @@ -568,6 +593,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv32i16.i32( + undef, %0, %1, i32 %2) @@ -602,6 +628,7 @@ } declare @llvm.riscv.vrgather.vv.nxv1i32.i32( + , , , i32); @@ -615,6 +642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1i32.i32( + undef, %0, %1, i32 %2) @@ -648,6 +676,7 @@ } declare @llvm.riscv.vrgather.vv.nxv2i32.i32( + , , , i32); @@ -661,6 +690,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2i32.i32( + undef, %0, %1, i32 %2) @@ -694,6 +724,7 @@ } declare @llvm.riscv.vrgather.vv.nxv4i32.i32( + , , , i32); @@ -707,6 +738,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4i32.i32( + undef, %0, %1, i32 %2) @@ -740,6 +772,7 @@ } declare @llvm.riscv.vrgather.vv.nxv8i32.i32( + , , , i32); @@ -753,6 +786,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i32.i32( + undef, %0, %1, i32 %2) @@ -786,6 +820,7 @@ } declare @llvm.riscv.vrgather.vv.nxv16i32.i32( + , , , i32); @@ -799,6 +834,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16i32.i32( + undef, %0, %1, i32 %2) @@ -833,6 +869,7 @@ } declare @llvm.riscv.vrgather.vv.nxv1f16.i32( + , , , i32); @@ -846,6 +883,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1f16.i32( + undef, %0, %1, i32 %2) @@ -879,6 +917,7 @@ } declare @llvm.riscv.vrgather.vv.nxv2f16.i32( + , , , i32); @@ -892,6 +931,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2f16.i32( + undef, %0, %1, i32 %2) @@ -925,6 +965,7 @@ } declare @llvm.riscv.vrgather.vv.nxv4f16.i32( + , , , i32); @@ -938,6 +979,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4f16.i32( + undef, %0, %1, i32 %2) @@ -971,6 +1013,7 @@ } declare @llvm.riscv.vrgather.vv.nxv8f16.i32( + , , , i32); @@ -984,6 +1027,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8f16.i32( + undef, %0, %1, i32 %2) @@ -1017,6 +1061,7 @@ } declare @llvm.riscv.vrgather.vv.nxv16f16.i32( + , , , i32); @@ -1030,6 +1075,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16f16.i32( + undef, %0, %1, i32 %2) @@ -1063,6 +1109,7 @@ } declare @llvm.riscv.vrgather.vv.nxv32f16.i32( + , , , i32); @@ -1076,6 +1123,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv32f16.i32( + undef, %0, %1, i32 %2) @@ -1110,6 +1158,7 @@ } declare @llvm.riscv.vrgather.vv.nxv1f32.i32( + , , , i32); @@ -1123,6 +1172,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1f32.i32( + undef, %0, %1, i32 %2) @@ -1156,6 +1206,7 @@ } declare @llvm.riscv.vrgather.vv.nxv2f32.i32( + , , , i32); @@ -1169,6 +1220,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2f32.i32( + undef, %0, %1, i32 %2) @@ -1202,6 +1254,7 @@ } declare @llvm.riscv.vrgather.vv.nxv4f32.i32( + , , , i32); @@ -1215,6 +1268,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4f32.i32( + undef, %0, %1, i32 %2) @@ -1248,6 +1302,7 @@ } declare @llvm.riscv.vrgather.vv.nxv8f32.i32( + , , , i32); @@ -1261,6 +1316,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8f32.i32( + undef, %0, %1, i32 %2) @@ -1294,6 +1350,7 @@ } declare @llvm.riscv.vrgather.vv.nxv16f32.i32( + , , , i32); @@ -1307,6 +1364,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16f32.i32( + undef, %0, %1, i32 %2) @@ -1341,6 +1399,7 @@ } declare @llvm.riscv.vrgather.vv.nxv1f64.i32( + , , , i32); @@ -1354,6 +1413,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1f64.i32( + undef, %0, %1, i32 %2) @@ -1387,6 +1447,7 @@ } declare @llvm.riscv.vrgather.vv.nxv2f64.i32( + , , , i32); @@ -1400,6 +1461,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2f64.i32( + undef, %0, %1, i32 %2) @@ -1433,6 +1495,7 @@ } declare @llvm.riscv.vrgather.vv.nxv4f64.i32( + , , , i32); @@ -1446,6 +1509,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4f64.i32( + undef, %0, %1, i32 %2) @@ -1479,6 +1543,7 @@ } declare @llvm.riscv.vrgather.vv.nxv8f64.i32( + , , , i32); @@ -1492,6 +1557,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8f64.i32( + undef, %0, %1, i32 %2) @@ -1526,6 +1592,7 @@ } declare @llvm.riscv.vrgather.vx.nxv1i8.i32( + , , i32, i32); @@ -1539,6 +1606,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1572,6 +1640,7 @@ } declare @llvm.riscv.vrgather.vx.nxv2i8.i32( + , , i32, i32); @@ -1585,6 +1654,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1618,6 +1688,7 @@ } declare @llvm.riscv.vrgather.vx.nxv4i8.i32( + , , i32, i32); @@ -1631,6 +1702,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1664,6 +1736,7 @@ } declare @llvm.riscv.vrgather.vx.nxv8i8.i32( + , , i32, i32); @@ -1677,6 +1750,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1710,6 +1784,7 @@ } declare @llvm.riscv.vrgather.vx.nxv16i8.i32( + , , i32, i32); @@ -1723,6 +1798,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1756,6 +1832,7 @@ } declare @llvm.riscv.vrgather.vx.nxv32i8.i32( + , , i32, i32); @@ -1769,6 +1846,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1802,6 +1880,7 @@ } declare @llvm.riscv.vrgather.vx.nxv64i8.i32( + , , i32, i32); @@ -1815,6 +1894,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv64i8.i32( + undef, %0, i32 %1, i32 %2) @@ -1848,6 +1928,7 @@ } declare @llvm.riscv.vrgather.vx.nxv1i16.i32( + , , i32, i32); @@ -1861,6 +1942,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1i16.i32( + undef, %0, i32 %1, i32 %2) @@ -1894,6 +1976,7 @@ } declare @llvm.riscv.vrgather.vx.nxv2i16.i32( + , , i32, i32); @@ -1907,6 +1990,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i16.i32( + undef, %0, i32 %1, i32 %2) @@ -1940,6 +2024,7 @@ } declare @llvm.riscv.vrgather.vx.nxv4i16.i32( + , , i32, i32); @@ -1953,6 +2038,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i16.i32( + undef, %0, i32 %1, i32 %2) @@ -1986,6 +2072,7 @@ } declare @llvm.riscv.vrgather.vx.nxv8i16.i32( + , , i32, i32); @@ -1999,6 +2086,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i16.i32( + undef, %0, i32 %1, i32 %2) @@ -2032,6 +2120,7 @@ } declare @llvm.riscv.vrgather.vx.nxv16i16.i32( + , , i32, i32); @@ -2045,6 +2134,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i16.i32( + undef, %0, i32 %1, i32 %2) @@ -2078,6 +2168,7 @@ } declare @llvm.riscv.vrgather.vx.nxv32i16.i32( + , , i32, i32); @@ -2091,6 +2182,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i16.i32( + undef, %0, i32 %1, i32 %2) @@ -2124,6 +2216,7 @@ } declare @llvm.riscv.vrgather.vx.nxv1i32.i32( + , , i32, i32); @@ -2137,6 +2230,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -2170,6 +2264,7 @@ } declare @llvm.riscv.vrgather.vx.nxv2i32.i32( + , , i32, i32); @@ -2183,6 +2278,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -2216,6 +2312,7 @@ } declare @llvm.riscv.vrgather.vx.nxv4i32.i32( + , , i32, i32); @@ -2229,6 +2326,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -2262,6 +2360,7 @@ } declare @llvm.riscv.vrgather.vx.nxv8i32.i32( + , , i32, i32); @@ -2275,6 +2374,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -2308,6 +2408,7 @@ } declare @llvm.riscv.vrgather.vx.nxv16i32.i32( + , , i32, i32); @@ -2321,6 +2422,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -2354,6 +2456,7 @@ } declare @llvm.riscv.vrgather.vx.nxv1f16.i32( + , , i32, i32); @@ -2367,6 +2470,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f16.i32( + undef, %0, i32 %1, i32 %2) @@ -2400,6 +2504,7 @@ } declare @llvm.riscv.vrgather.vx.nxv2f16.i32( + , , i32, i32); @@ -2413,6 +2518,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f16.i32( + undef, %0, i32 %1, i32 %2) @@ -2446,6 +2552,7 @@ } declare @llvm.riscv.vrgather.vx.nxv4f16.i32( + , , i32, i32); @@ -2459,6 +2566,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f16.i32( + undef, %0, i32 %1, i32 %2) @@ -2492,6 +2600,7 @@ } declare @llvm.riscv.vrgather.vx.nxv8f16.i32( + , , i32, i32); @@ -2505,6 +2614,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f16.i32( + undef, %0, i32 %1, i32 %2) @@ -2538,6 +2648,7 @@ } declare @llvm.riscv.vrgather.vx.nxv16f16.i32( + , , i32, i32); @@ -2551,6 +2662,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f16.i32( + undef, %0, i32 %1, i32 %2) @@ -2584,6 +2696,7 @@ } declare @llvm.riscv.vrgather.vx.nxv32f16.i32( + , , i32, i32); @@ -2597,6 +2710,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32f16.i32( + undef, %0, i32 %1, i32 %2) @@ -2630,6 +2744,7 @@ } declare @llvm.riscv.vrgather.vx.nxv1f32.i32( + , , i32, i32); @@ -2643,6 +2758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f32.i32( + undef, %0, i32 %1, i32 %2) @@ -2676,6 +2792,7 @@ } declare @llvm.riscv.vrgather.vx.nxv2f32.i32( + , , i32, i32); @@ -2689,6 +2806,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f32.i32( + undef, %0, i32 %1, i32 %2) @@ -2722,6 +2840,7 @@ } declare @llvm.riscv.vrgather.vx.nxv4f32.i32( + , , i32, i32); @@ -2735,6 +2854,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f32.i32( + undef, %0, i32 %1, i32 %2) @@ -2768,6 +2888,7 @@ } declare @llvm.riscv.vrgather.vx.nxv8f32.i32( + , , i32, i32); @@ -2781,6 +2902,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f32.i32( + undef, %0, i32 %1, i32 %2) @@ -2814,6 +2936,7 @@ } declare @llvm.riscv.vrgather.vx.nxv16f32.i32( + , , i32, i32); @@ -2827,6 +2950,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f32.i32( + undef, %0, i32 %1, i32 %2) @@ -2860,6 +2984,7 @@ } declare @llvm.riscv.vrgather.vx.nxv1f64.i32( + , , i32, i32); @@ -2873,6 +2998,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f64.i32( + undef, %0, i32 %1, i32 %2) @@ -2906,6 +3032,7 @@ } declare @llvm.riscv.vrgather.vx.nxv2f64.i32( + , , i32, i32); @@ -2919,6 +3046,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f64.i32( + undef, %0, i32 %1, i32 %2) @@ -2952,6 +3080,7 @@ } declare @llvm.riscv.vrgather.vx.nxv4f64.i32( + , , i32, i32); @@ -2965,6 +3094,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f64.i32( + undef, %0, i32 %1, i32 %2) @@ -2998,6 +3128,7 @@ } declare @llvm.riscv.vrgather.vx.nxv8f64.i32( + , , i32, i32); @@ -3011,6 +3142,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f64.i32( + undef, %0, i32 %1, i32 %2) @@ -3052,6 +3184,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1i8.i32( + undef, %0, i32 9, i32 %1) @@ -3085,6 +3218,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i8.i32( + undef, %0, i32 9, i32 %1) @@ -3118,6 +3252,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i8.i32( + undef, %0, i32 9, i32 %1) @@ -3151,6 +3286,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i8.i32( + undef, %0, i32 9, i32 %1) @@ -3184,6 +3320,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i8.i32( + undef, %0, i32 9, i32 %1) @@ -3217,6 +3354,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i8.i32( + undef, %0, i32 9, i32 %1) @@ -3250,6 +3388,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv64i8.i32( + undef, %0, i32 9, i32 %1) @@ -3283,6 +3422,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1i16.i32( + undef, %0, i32 9, i32 %1) @@ -3316,6 +3456,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i16.i32( + undef, %0, i32 9, i32 %1) @@ -3349,6 +3490,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i16.i32( + undef, %0, i32 9, i32 %1) @@ -3382,6 +3524,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i16.i32( + undef, %0, i32 9, i32 %1) @@ -3415,6 +3558,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i16.i32( + undef, %0, i32 9, i32 %1) @@ -3448,6 +3592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i16.i32( + undef, %0, i32 9, i32 %1) @@ -3481,6 +3626,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -3514,6 +3660,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -3547,6 +3694,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -3580,6 +3728,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -3613,6 +3762,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -3646,6 +3796,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f16.i32( + undef, %0, i32 9, i32 %1) @@ -3679,6 +3830,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f16.i32( + undef, %0, i32 9, i32 %1) @@ -3712,6 +3864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f16.i32( + undef, %0, i32 9, i32 %1) @@ -3745,6 +3898,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f16.i32( + undef, %0, i32 9, i32 %1) @@ -3778,6 +3932,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f16.i32( + undef, %0, i32 9, i32 %1) @@ -3811,6 +3966,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32f16.i32( + undef, %0, i32 9, i32 %1) @@ -3844,6 +4000,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f32.i32( + undef, %0, i32 9, i32 %1) @@ -3877,6 +4034,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f32.i32( + undef, %0, i32 9, i32 %1) @@ -3910,6 +4068,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f32.i32( + undef, %0, i32 9, i32 %1) @@ -3943,6 +4102,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f32.i32( + undef, %0, i32 9, i32 %1) @@ -3976,6 +4136,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f32.i32( + undef, %0, i32 9, i32 %1) @@ -4009,6 +4170,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f64.i32( + undef, %0, i32 9, i32 %1) @@ -4042,6 +4204,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f64.i32( + undef, %0, i32 9, i32 %1) @@ -4075,6 +4238,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f64.i32( + undef, %0, i32 9, i32 %1) @@ -4108,6 +4272,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f64.i32( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrgather.vv.nxv1i8.i64( + , , , i64); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1i8.i64( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vrgather.vv.nxv2i8.i64( + , , , i64); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2i8.i64( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vrgather.vv.nxv4i8.i64( + , , , i64); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4i8.i64( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vrgather.vv.nxv8i8.i64( + , , , i64); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i8.i64( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vrgather.vv.nxv16i8.i64( + , , , i64); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16i8.i64( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vrgather.vv.nxv32i8.i64( + , , , i64); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv32i8.i64( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vrgather.vv.nxv64i8.i64( + , , , i64); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv64i8.i64( + undef, %0, %1, i64 %2) @@ -325,6 +339,7 @@ } declare @llvm.riscv.vrgather.vv.nxv1i16.i64( + , , , i64); @@ -338,6 +353,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1i16.i64( + undef, %0, %1, i64 %2) @@ -371,6 +387,7 @@ } declare @llvm.riscv.vrgather.vv.nxv2i16.i64( + , , , i64); @@ -384,6 +401,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2i16.i64( + undef, %0, %1, i64 %2) @@ -417,6 +435,7 @@ } declare @llvm.riscv.vrgather.vv.nxv4i16.i64( + , , , i64); @@ -430,6 +449,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4i16.i64( + undef, %0, %1, i64 %2) @@ -463,6 +483,7 @@ } declare @llvm.riscv.vrgather.vv.nxv8i16.i64( + , , , i64); @@ -476,6 +497,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i16.i64( + undef, %0, %1, i64 %2) @@ -509,6 +531,7 @@ } declare @llvm.riscv.vrgather.vv.nxv16i16.i64( + , , , i64); @@ -522,6 +545,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16i16.i64( + undef, %0, %1, i64 %2) @@ -555,6 +579,7 @@ } declare @llvm.riscv.vrgather.vv.nxv32i16.i64( + , , , i64); @@ -568,6 +593,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv32i16.i64( + undef, %0, %1, i64 %2) @@ -602,6 +628,7 @@ } declare @llvm.riscv.vrgather.vv.nxv1i32.i64( + , , , i64); @@ -615,6 +642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1i32.i64( + undef, %0, %1, i64 %2) @@ -648,6 +676,7 @@ } declare @llvm.riscv.vrgather.vv.nxv2i32.i64( + , , , i64); @@ -661,6 +690,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2i32.i64( + undef, %0, %1, i64 %2) @@ -694,6 +724,7 @@ } declare @llvm.riscv.vrgather.vv.nxv4i32.i64( + , , , i64); @@ -707,6 +738,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4i32.i64( + undef, %0, %1, i64 %2) @@ -740,6 +772,7 @@ } declare @llvm.riscv.vrgather.vv.nxv8i32.i64( + , , , i64); @@ -753,6 +786,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i32.i64( + undef, %0, %1, i64 %2) @@ -786,6 +820,7 @@ } declare @llvm.riscv.vrgather.vv.nxv16i32.i64( + , , , i64); @@ -799,6 +834,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16i32.i64( + undef, %0, %1, i64 %2) @@ -833,6 +869,7 @@ } declare @llvm.riscv.vrgather.vv.nxv1i64.i64( + , , , i64); @@ -846,6 +883,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1i64.i64( + undef, %0, %1, i64 %2) @@ -879,6 +917,7 @@ } declare @llvm.riscv.vrgather.vv.nxv2i64.i64( + , , , i64); @@ -892,6 +931,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2i64.i64( + undef, %0, %1, i64 %2) @@ -925,6 +965,7 @@ } declare @llvm.riscv.vrgather.vv.nxv4i64.i64( + , , , i64); @@ -938,6 +979,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4i64.i64( + undef, %0, %1, i64 %2) @@ -971,6 +1013,7 @@ } declare @llvm.riscv.vrgather.vv.nxv8i64.i64( + , , , i64); @@ -984,6 +1027,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i64.i64( + undef, %0, %1, i64 %2) @@ -1018,6 +1062,7 @@ } declare @llvm.riscv.vrgather.vv.nxv1f16.i64( + , , , i64); @@ -1031,6 +1076,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1f16.i64( + undef, %0, %1, i64 %2) @@ -1064,6 +1110,7 @@ } declare @llvm.riscv.vrgather.vv.nxv2f16.i64( + , , , i64); @@ -1077,6 +1124,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2f16.i64( + undef, %0, %1, i64 %2) @@ -1110,6 +1158,7 @@ } declare @llvm.riscv.vrgather.vv.nxv4f16.i64( + , , , i64); @@ -1123,6 +1172,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4f16.i64( + undef, %0, %1, i64 %2) @@ -1156,6 +1206,7 @@ } declare @llvm.riscv.vrgather.vv.nxv8f16.i64( + , , , i64); @@ -1169,6 +1220,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8f16.i64( + undef, %0, %1, i64 %2) @@ -1202,6 +1254,7 @@ } declare @llvm.riscv.vrgather.vv.nxv16f16.i64( + , , , i64); @@ -1215,6 +1268,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16f16.i64( + undef, %0, %1, i64 %2) @@ -1248,6 +1302,7 @@ } declare @llvm.riscv.vrgather.vv.nxv32f16.i64( + , , , i64); @@ -1261,6 +1316,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv32f16.i64( + undef, %0, %1, i64 %2) @@ -1295,6 +1351,7 @@ } declare @llvm.riscv.vrgather.vv.nxv1f32.i64( + , , , i64); @@ -1308,6 +1365,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1f32.i64( + undef, %0, %1, i64 %2) @@ -1341,6 +1399,7 @@ } declare @llvm.riscv.vrgather.vv.nxv2f32.i64( + , , , i64); @@ -1354,6 +1413,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2f32.i64( + undef, %0, %1, i64 %2) @@ -1387,6 +1447,7 @@ } declare @llvm.riscv.vrgather.vv.nxv4f32.i64( + , , , i64); @@ -1400,6 +1461,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4f32.i64( + undef, %0, %1, i64 %2) @@ -1433,6 +1495,7 @@ } declare @llvm.riscv.vrgather.vv.nxv8f32.i64( + , , , i64); @@ -1446,6 +1509,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8f32.i64( + undef, %0, %1, i64 %2) @@ -1479,6 +1543,7 @@ } declare @llvm.riscv.vrgather.vv.nxv16f32.i64( + , , , i64); @@ -1492,6 +1557,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16f32.i64( + undef, %0, %1, i64 %2) @@ -1526,6 +1592,7 @@ } declare @llvm.riscv.vrgather.vv.nxv1f64.i64( + , , , i64); @@ -1539,6 +1606,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1f64.i64( + undef, %0, %1, i64 %2) @@ -1572,6 +1640,7 @@ } declare @llvm.riscv.vrgather.vv.nxv2f64.i64( + , , , i64); @@ -1585,6 +1654,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2f64.i64( + undef, %0, %1, i64 %2) @@ -1618,6 +1688,7 @@ } declare @llvm.riscv.vrgather.vv.nxv4f64.i64( + , , , i64); @@ -1631,6 +1702,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4f64.i64( + undef, %0, %1, i64 %2) @@ -1664,6 +1736,7 @@ } declare @llvm.riscv.vrgather.vv.nxv8f64.i64( + , , , i64); @@ -1677,6 +1750,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8f64.i64( + undef, %0, %1, i64 %2) @@ -1711,6 +1785,7 @@ } declare @llvm.riscv.vrgather.vx.nxv1i8.i64( + , , i64, i64); @@ -1724,6 +1799,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1i8.i64( + undef, %0, i64 %1, i64 %2) @@ -1757,6 +1833,7 @@ } declare @llvm.riscv.vrgather.vx.nxv2i8.i64( + , , i64, i64); @@ -1770,6 +1847,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i8.i64( + undef, %0, i64 %1, i64 %2) @@ -1803,6 +1881,7 @@ } declare @llvm.riscv.vrgather.vx.nxv4i8.i64( + , , i64, i64); @@ -1816,6 +1895,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i8.i64( + undef, %0, i64 %1, i64 %2) @@ -1849,6 +1929,7 @@ } declare @llvm.riscv.vrgather.vx.nxv8i8.i64( + , , i64, i64); @@ -1862,6 +1943,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i8.i64( + undef, %0, i64 %1, i64 %2) @@ -1895,6 +1977,7 @@ } declare @llvm.riscv.vrgather.vx.nxv16i8.i64( + , , i64, i64); @@ -1908,6 +1991,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i8.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2025,7 @@ } declare @llvm.riscv.vrgather.vx.nxv32i8.i64( + , , i64, i64); @@ -1954,6 +2039,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i8.i64( + undef, %0, i64 %1, i64 %2) @@ -1987,6 +2073,7 @@ } declare @llvm.riscv.vrgather.vx.nxv64i8.i64( + , , i64, i64); @@ -2000,6 +2087,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv64i8.i64( + undef, %0, i64 %1, i64 %2) @@ -2033,6 +2121,7 @@ } declare @llvm.riscv.vrgather.vx.nxv1i16.i64( + , , i64, i64); @@ -2046,6 +2135,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1i16.i64( + undef, %0, i64 %1, i64 %2) @@ -2079,6 +2169,7 @@ } declare @llvm.riscv.vrgather.vx.nxv2i16.i64( + , , i64, i64); @@ -2092,6 +2183,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i16.i64( + undef, %0, i64 %1, i64 %2) @@ -2125,6 +2217,7 @@ } declare @llvm.riscv.vrgather.vx.nxv4i16.i64( + , , i64, i64); @@ -2138,6 +2231,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i16.i64( + undef, %0, i64 %1, i64 %2) @@ -2171,6 +2265,7 @@ } declare @llvm.riscv.vrgather.vx.nxv8i16.i64( + , , i64, i64); @@ -2184,6 +2279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i16.i64( + undef, %0, i64 %1, i64 %2) @@ -2217,6 +2313,7 @@ } declare @llvm.riscv.vrgather.vx.nxv16i16.i64( + , , i64, i64); @@ -2230,6 +2327,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i16.i64( + undef, %0, i64 %1, i64 %2) @@ -2263,6 +2361,7 @@ } declare @llvm.riscv.vrgather.vx.nxv32i16.i64( + , , i64, i64); @@ -2276,6 +2375,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i16.i64( + undef, %0, i64 %1, i64 %2) @@ -2309,6 +2409,7 @@ } declare @llvm.riscv.vrgather.vx.nxv1i32.i64( + , , i64, i64); @@ -2322,6 +2423,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1i32.i64( + undef, %0, i64 %1, i64 %2) @@ -2355,6 +2457,7 @@ } declare @llvm.riscv.vrgather.vx.nxv2i32.i64( + , , i64, i64); @@ -2368,6 +2471,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i32.i64( + undef, %0, i64 %1, i64 %2) @@ -2401,6 +2505,7 @@ } declare @llvm.riscv.vrgather.vx.nxv4i32.i64( + , , i64, i64); @@ -2414,6 +2519,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i32.i64( + undef, %0, i64 %1, i64 %2) @@ -2447,6 +2553,7 @@ } declare @llvm.riscv.vrgather.vx.nxv8i32.i64( + , , i64, i64); @@ -2460,6 +2567,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i32.i64( + undef, %0, i64 %1, i64 %2) @@ -2493,6 +2601,7 @@ } declare @llvm.riscv.vrgather.vx.nxv16i32.i64( + , , i64, i64); @@ -2506,6 +2615,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i32.i64( + undef, %0, i64 %1, i64 %2) @@ -2539,6 +2649,7 @@ } declare @llvm.riscv.vrgather.vx.nxv1i64.i64( + , , i64, i64); @@ -2552,6 +2663,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -2585,6 +2697,7 @@ } declare @llvm.riscv.vrgather.vx.nxv2i64.i64( + , , i64, i64); @@ -2598,6 +2711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -2631,6 +2745,7 @@ } declare @llvm.riscv.vrgather.vx.nxv4i64.i64( + , , i64, i64); @@ -2644,6 +2759,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -2677,6 +2793,7 @@ } declare @llvm.riscv.vrgather.vx.nxv8i64.i64( + , , i64, i64); @@ -2690,6 +2807,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -2723,6 +2841,7 @@ } declare @llvm.riscv.vrgather.vx.nxv1f16.i64( + , , i64, i64); @@ -2736,6 +2855,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f16.i64( + undef, %0, i64 %1, i64 %2) @@ -2769,6 +2889,7 @@ } declare @llvm.riscv.vrgather.vx.nxv2f16.i64( + , , i64, i64); @@ -2782,6 +2903,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f16.i64( + undef, %0, i64 %1, i64 %2) @@ -2815,6 +2937,7 @@ } declare @llvm.riscv.vrgather.vx.nxv4f16.i64( + , , i64, i64); @@ -2828,6 +2951,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f16.i64( + undef, %0, i64 %1, i64 %2) @@ -2861,6 +2985,7 @@ } declare @llvm.riscv.vrgather.vx.nxv8f16.i64( + , , i64, i64); @@ -2874,6 +2999,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f16.i64( + undef, %0, i64 %1, i64 %2) @@ -2907,6 +3033,7 @@ } declare @llvm.riscv.vrgather.vx.nxv16f16.i64( + , , i64, i64); @@ -2920,6 +3047,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f16.i64( + undef, %0, i64 %1, i64 %2) @@ -2953,6 +3081,7 @@ } declare @llvm.riscv.vrgather.vx.nxv32f16.i64( + , , i64, i64); @@ -2966,6 +3095,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32f16.i64( + undef, %0, i64 %1, i64 %2) @@ -2999,6 +3129,7 @@ } declare @llvm.riscv.vrgather.vx.nxv1f32.i64( + , , i64, i64); @@ -3012,6 +3143,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f32.i64( + undef, %0, i64 %1, i64 %2) @@ -3045,6 +3177,7 @@ } declare @llvm.riscv.vrgather.vx.nxv2f32.i64( + , , i64, i64); @@ -3058,6 +3191,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f32.i64( + undef, %0, i64 %1, i64 %2) @@ -3091,6 +3225,7 @@ } declare @llvm.riscv.vrgather.vx.nxv4f32.i64( + , , i64, i64); @@ -3104,6 +3239,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f32.i64( + undef, %0, i64 %1, i64 %2) @@ -3137,6 +3273,7 @@ } declare @llvm.riscv.vrgather.vx.nxv8f32.i64( + , , i64, i64); @@ -3150,6 +3287,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f32.i64( + undef, %0, i64 %1, i64 %2) @@ -3183,6 +3321,7 @@ } declare @llvm.riscv.vrgather.vx.nxv16f32.i64( + , , i64, i64); @@ -3196,6 +3335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f32.i64( + undef, %0, i64 %1, i64 %2) @@ -3229,6 +3369,7 @@ } declare @llvm.riscv.vrgather.vx.nxv1f64.i64( + , , i64, i64); @@ -3242,6 +3383,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f64.i64( + undef, %0, i64 %1, i64 %2) @@ -3275,6 +3417,7 @@ } declare @llvm.riscv.vrgather.vx.nxv2f64.i64( + , , i64, i64); @@ -3288,6 +3431,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f64.i64( + undef, %0, i64 %1, i64 %2) @@ -3321,6 +3465,7 @@ } declare @llvm.riscv.vrgather.vx.nxv4f64.i64( + , , i64, i64); @@ -3334,6 +3479,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f64.i64( + undef, %0, i64 %1, i64 %2) @@ -3367,6 +3513,7 @@ } declare @llvm.riscv.vrgather.vx.nxv8f64.i64( + , , i64, i64); @@ -3380,6 +3527,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f64.i64( + undef, %0, i64 %1, i64 %2) @@ -3421,6 +3569,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1i8.i64( + undef, %0, i64 9, i64 %1) @@ -3454,6 +3603,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i8.i64( + undef, %0, i64 9, i64 %1) @@ -3487,6 +3637,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i8.i64( + undef, %0, i64 9, i64 %1) @@ -3520,6 +3671,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i8.i64( + undef, %0, i64 9, i64 %1) @@ -3553,6 +3705,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i8.i64( + undef, %0, i64 9, i64 %1) @@ -3586,6 +3739,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i8.i64( + undef, %0, i64 9, i64 %1) @@ -3619,6 +3773,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv64i8.i64( + undef, %0, i64 9, i64 %1) @@ -3652,6 +3807,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1i16.i64( + undef, %0, i64 9, i64 %1) @@ -3685,6 +3841,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i16.i64( + undef, %0, i64 9, i64 %1) @@ -3718,6 +3875,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i16.i64( + undef, %0, i64 9, i64 %1) @@ -3751,6 +3909,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i16.i64( + undef, %0, i64 9, i64 %1) @@ -3784,6 +3943,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i16.i64( + undef, %0, i64 9, i64 %1) @@ -3817,6 +3977,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i16.i64( + undef, %0, i64 9, i64 %1) @@ -3850,6 +4011,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1i32.i64( + undef, %0, i64 9, i64 %1) @@ -3883,6 +4045,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i32.i64( + undef, %0, i64 9, i64 %1) @@ -3916,6 +4079,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i32.i64( + undef, %0, i64 9, i64 %1) @@ -3949,6 +4113,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i32.i64( + undef, %0, i64 9, i64 %1) @@ -3982,6 +4147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i32.i64( + undef, %0, i64 9, i64 %1) @@ -4015,6 +4181,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -4048,6 +4215,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -4081,6 +4249,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -4114,6 +4283,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i64.i64( + undef, %0, i64 9, i64 %1) @@ -4147,6 +4317,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f16.i64( + undef, %0, i64 9, i64 %1) @@ -4180,6 +4351,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f16.i64( + undef, %0, i64 9, i64 %1) @@ -4213,6 +4385,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f16.i64( + undef, %0, i64 9, i64 %1) @@ -4246,6 +4419,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f16.i64( + undef, %0, i64 9, i64 %1) @@ -4279,6 +4453,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f16.i64( + undef, %0, i64 9, i64 %1) @@ -4312,6 +4487,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32f16.i64( + undef, %0, i64 9, i64 %1) @@ -4345,6 +4521,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f32.i64( + undef, %0, i64 9, i64 %1) @@ -4378,6 +4555,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f32.i64( + undef, %0, i64 9, i64 %1) @@ -4411,6 +4589,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f32.i64( + undef, %0, i64 9, i64 %1) @@ -4444,6 +4623,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f32.i64( + undef, %0, i64 9, i64 %1) @@ -4477,6 +4657,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f32.i64( + undef, %0, i64 9, i64 %1) @@ -4510,6 +4691,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f64.i64( + undef, %0, i64 9, i64 %1) @@ -4543,6 +4725,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f64.i64( + undef, %0, i64 9, i64 %1) @@ -4576,6 +4759,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f64.i64( + undef, %0, i64 9, i64 %1) @@ -4609,6 +4793,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrgatherei16.vv.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv32i16( + , , , i32); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv32i16( + undef, %0, %1, i32 %2) @@ -555,6 +579,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv1i32( + , , , i32); @@ -568,6 +593,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv1i32( + undef, %0, %1, i32 %2) @@ -601,6 +627,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv4i32( + , , , i32); @@ -614,6 +641,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i32( + undef, %0, %1, i32 %2) @@ -647,6 +675,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv8i32( + , , , i32); @@ -660,6 +689,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i32( + undef, %0, %1, i32 %2) @@ -693,6 +723,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv16i32( + , , , i32); @@ -706,6 +737,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16i32( + undef, %0, %1, i32 %2) @@ -740,6 +772,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv4i64( + , , , i32); @@ -753,6 +786,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i64( + undef, %0, %1, i32 %2) @@ -786,6 +820,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv8i64( + , , , i32); @@ -799,6 +834,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i64( + undef, %0, %1, i32 %2) @@ -833,6 +869,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv1f16( + , , , i32); @@ -846,6 +883,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv1f16( + undef, %0, %1, i32 %2) @@ -879,6 +917,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv2f16( + , , , i32); @@ -892,6 +931,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv2f16( + undef, %0, %1, i32 %2) @@ -925,6 +965,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv4f16( + , , , i32); @@ -938,6 +979,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4f16( + undef, %0, %1, i32 %2) @@ -971,6 +1013,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv8f16( + , , , i32); @@ -984,6 +1027,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8f16( + undef, %0, %1, i32 %2) @@ -1017,6 +1061,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv16f16( + , , , i32); @@ -1030,6 +1075,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16f16( + undef, %0, %1, i32 %2) @@ -1063,6 +1109,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv32f16( + , , , i32); @@ -1076,6 +1123,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv32f16( + undef, %0, %1, i32 %2) @@ -1110,6 +1158,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv1f32( + , , , i32); @@ -1123,6 +1172,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv1f32( + undef, %0, %1, i32 %2) @@ -1156,6 +1206,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv4f32( + , , , i32); @@ -1169,6 +1220,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4f32( + undef, %0, %1, i32 %2) @@ -1202,6 +1254,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv8f32( + , , , i32); @@ -1215,6 +1268,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8f32( + undef, %0, %1, i32 %2) @@ -1248,6 +1302,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv16f32( + , , , i32); @@ -1261,6 +1316,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16f32( + undef, %0, %1, i32 %2) @@ -1295,6 +1351,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv4f64( + , , , i32); @@ -1308,6 +1365,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4f64( + undef, %0, %1, i32 %2) @@ -1341,6 +1399,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv8f64( + , , , i32); @@ -1354,6 +1413,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8f64( + undef, %0, %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrgatherei16.vv.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv32i16( + , , , i64); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv32i16( + undef, %0, %1, i64 %2) @@ -555,6 +579,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv1i32( + , , , i64); @@ -568,6 +593,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv1i32( + undef, %0, %1, i64 %2) @@ -601,6 +627,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv4i32( + , , , i64); @@ -614,6 +641,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i32( + undef, %0, %1, i64 %2) @@ -647,6 +675,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv8i32( + , , , i64); @@ -660,6 +689,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i32( + undef, %0, %1, i64 %2) @@ -693,6 +723,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv16i32( + , , , i64); @@ -706,6 +737,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16i32( + undef, %0, %1, i64 %2) @@ -740,6 +772,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv4i64( + , , , i64); @@ -753,6 +786,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i64( + undef, %0, %1, i64 %2) @@ -786,6 +820,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv8i64( + , , , i64); @@ -799,6 +834,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i64( + undef, %0, %1, i64 %2) @@ -833,6 +869,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv1f16( + , , , i64); @@ -846,6 +883,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv1f16( + undef, %0, %1, i64 %2) @@ -879,6 +917,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv2f16( + , , , i64); @@ -892,6 +931,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv2f16( + undef, %0, %1, i64 %2) @@ -925,6 +965,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv4f16( + , , , i64); @@ -938,6 +979,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4f16( + undef, %0, %1, i64 %2) @@ -971,6 +1013,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv8f16( + , , , i64); @@ -984,6 +1027,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8f16( + undef, %0, %1, i64 %2) @@ -1017,6 +1061,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv16f16( + , , , i64); @@ -1030,6 +1075,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16f16( + undef, %0, %1, i64 %2) @@ -1063,6 +1109,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv32f16( + , , , i64); @@ -1076,6 +1123,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv32f16( + undef, %0, %1, i64 %2) @@ -1110,6 +1158,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv1f32( + , , , i64); @@ -1123,6 +1172,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv1f32( + undef, %0, %1, i64 %2) @@ -1156,6 +1206,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv4f32( + , , , i64); @@ -1169,6 +1220,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4f32( + undef, %0, %1, i64 %2) @@ -1202,6 +1254,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv8f32( + , , , i64); @@ -1215,6 +1268,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8f32( + undef, %0, %1, i64 %2) @@ -1248,6 +1302,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv16f32( + , , , i64); @@ -1261,6 +1316,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16f32( + undef, %0, %1, i64 %2) @@ -1295,6 +1351,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv4f64( + , , , i64); @@ -1308,6 +1365,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4f64( + undef, %0, %1, i64 %2) @@ -1341,6 +1399,7 @@ } declare @llvm.riscv.vrgatherei16.vv.nxv8f64( + , , , i64); @@ -1354,6 +1413,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8f64( + undef, %0, %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrsub.nxv1i8.i8( + , , i8, i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vrsub.nxv2i8.i8( + , , i8, i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vrsub.nxv4i8.i8( + , , i8, i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vrsub.nxv8i8.i8( + , , i8, i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vrsub.nxv16i8.i8( + , , i8, i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vrsub.nxv32i8.i8( + , , i8, i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vrsub.nxv64i8.i8( + , , i8, i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -317,6 +331,7 @@ } declare @llvm.riscv.vrsub.nxv1i16.i16( + , , i16, i32); @@ -329,6 +344,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -362,6 +378,7 @@ } declare @llvm.riscv.vrsub.nxv2i16.i16( + , , i16, i32); @@ -374,6 +391,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -407,6 +425,7 @@ } declare @llvm.riscv.vrsub.nxv4i16.i16( + , , i16, i32); @@ -419,6 +438,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -452,6 +472,7 @@ } declare @llvm.riscv.vrsub.nxv8i16.i16( + , , i16, i32); @@ -464,6 +485,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -497,6 +519,7 @@ } declare @llvm.riscv.vrsub.nxv16i16.i16( + , , i16, i32); @@ -509,6 +532,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -542,6 +566,7 @@ } declare @llvm.riscv.vrsub.nxv32i16.i16( + , , i16, i32); @@ -554,6 +579,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -587,6 +613,7 @@ } declare @llvm.riscv.vrsub.nxv1i32.i32( + , , i32, i32); @@ -599,6 +626,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -632,6 +660,7 @@ } declare @llvm.riscv.vrsub.nxv2i32.i32( + , , i32, i32); @@ -644,6 +673,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -677,6 +707,7 @@ } declare @llvm.riscv.vrsub.nxv4i32.i32( + , , i32, i32); @@ -689,6 +720,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -722,6 +754,7 @@ } declare @llvm.riscv.vrsub.nxv8i32.i32( + , , i32, i32); @@ -734,6 +767,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -767,6 +801,7 @@ } declare @llvm.riscv.vrsub.nxv16i32.i32( + , , i32, i32); @@ -779,6 +814,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -812,6 +848,7 @@ } declare @llvm.riscv.vrsub.nxv1i64.i64( + , , i64, i32); @@ -830,6 +867,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -869,6 +907,7 @@ } declare @llvm.riscv.vrsub.nxv2i64.i64( + , , i64, i32); @@ -887,6 +926,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -926,6 +966,7 @@ } declare @llvm.riscv.vrsub.nxv4i64.i64( + , , i64, i32); @@ -944,6 +985,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -983,6 +1025,7 @@ } declare @llvm.riscv.vrsub.nxv8i64.i64( + , , i64, i32); @@ -1001,6 +1044,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1047,6 +1091,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -1079,6 +1124,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -1111,6 +1157,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -1143,6 +1190,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -1175,6 +1223,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -1207,6 +1256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -1239,6 +1289,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv64i8.i8( + undef, %0, i8 9, i32 %1) @@ -1271,6 +1322,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -1303,6 +1355,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -1335,6 +1388,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -1367,6 +1421,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -1399,6 +1454,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -1431,6 +1487,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -1463,6 +1520,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -1495,6 +1553,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -1527,6 +1586,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -1559,6 +1619,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -1591,6 +1652,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -1623,6 +1685,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -1655,6 +1718,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -1687,6 +1751,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -1719,6 +1784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vrsub.nxv1i8.i8( + , , i8, i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vrsub.nxv2i8.i8( + , , i8, i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vrsub.nxv4i8.i8( + , , i8, i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vrsub.nxv8i8.i8( + , , i8, i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vrsub.nxv16i8.i8( + , , i8, i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vrsub.nxv32i8.i8( + , , i8, i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vrsub.nxv64i8.i8( + , , i8, i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -317,6 +331,7 @@ } declare @llvm.riscv.vrsub.nxv1i16.i16( + , , i16, i64); @@ -329,6 +344,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -362,6 +378,7 @@ } declare @llvm.riscv.vrsub.nxv2i16.i16( + , , i16, i64); @@ -374,6 +391,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -407,6 +425,7 @@ } declare @llvm.riscv.vrsub.nxv4i16.i16( + , , i16, i64); @@ -419,6 +438,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -452,6 +472,7 @@ } declare @llvm.riscv.vrsub.nxv8i16.i16( + , , i16, i64); @@ -464,6 +485,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -497,6 +519,7 @@ } declare @llvm.riscv.vrsub.nxv16i16.i16( + , , i16, i64); @@ -509,6 +532,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -542,6 +566,7 @@ } declare @llvm.riscv.vrsub.nxv32i16.i16( + , , i16, i64); @@ -554,6 +579,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -587,6 +613,7 @@ } declare @llvm.riscv.vrsub.nxv1i32.i32( + , , i32, i64); @@ -599,6 +626,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -632,6 +660,7 @@ } declare @llvm.riscv.vrsub.nxv2i32.i32( + , , i32, i64); @@ -644,6 +673,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -677,6 +707,7 @@ } declare @llvm.riscv.vrsub.nxv4i32.i32( + , , i32, i64); @@ -689,6 +720,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -722,6 +754,7 @@ } declare @llvm.riscv.vrsub.nxv8i32.i32( + , , i32, i64); @@ -734,6 +767,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -767,6 +801,7 @@ } declare @llvm.riscv.vrsub.nxv16i32.i32( + , , i32, i64); @@ -779,6 +814,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -812,6 +848,7 @@ } declare @llvm.riscv.vrsub.nxv1i64.i64( + , , i64, i64); @@ -824,6 +861,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -857,6 +895,7 @@ } declare @llvm.riscv.vrsub.nxv2i64.i64( + , , i64, i64); @@ -869,6 +908,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -902,6 +942,7 @@ } declare @llvm.riscv.vrsub.nxv4i64.i64( + , , i64, i64); @@ -914,6 +955,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -947,6 +989,7 @@ } declare @llvm.riscv.vrsub.nxv8i64.i64( + , , i64, i64); @@ -959,6 +1002,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -999,6 +1043,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -1031,6 +1076,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -1063,6 +1109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -1095,6 +1142,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -1127,6 +1175,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -1159,6 +1208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -1191,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -1223,6 +1274,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -1255,6 +1307,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -1287,6 +1340,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -1319,6 +1373,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -1351,6 +1406,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -1383,6 +1439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -1415,6 +1472,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -1447,6 +1505,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -1479,6 +1538,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -1511,6 +1571,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -1543,6 +1604,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -1575,6 +1637,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -1607,6 +1670,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -1639,6 +1703,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -1671,6 +1736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsadd.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vsadd.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vsadd.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vsadd.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vsadd.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vsadd.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vsadd.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vsadd.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vsadd.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vsadd.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vsadd.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vsadd.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vsadd.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vsadd.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vsadd.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vsadd.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vsadd.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vsadd.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vsadd.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vsadd.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vsadd.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vsadd.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vsadd.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vsadd.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vsadd.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vsadd.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vsadd.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vsadd.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vsadd.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vsadd.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vsadd.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vsadd.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vsadd.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vsadd.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vsadd.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vsadd.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vsadd.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vsadd.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vsadd.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vsadd.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vsadd.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vsadd.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vsadd.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vsadd.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -2041,6 +2129,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2073,6 +2162,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2105,6 +2195,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2137,6 +2228,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2169,6 +2261,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2201,6 +2294,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2233,6 +2327,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv64i8.i8( + undef, %0, i8 9, i32 %1) @@ -2265,6 +2360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2297,6 +2393,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2329,6 +2426,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2361,6 +2459,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2393,6 +2492,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2425,6 +2525,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2457,6 +2558,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2489,6 +2591,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2521,6 +2624,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2553,6 +2657,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2585,6 +2690,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2617,6 +2723,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2649,6 +2756,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2681,6 +2789,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2713,6 +2822,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsadd.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vsadd.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vsadd.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vsadd.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vsadd.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vsadd.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vsadd.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vsadd.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vsadd.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vsadd.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vsadd.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vsadd.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vsadd.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vsadd.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vsadd.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vsadd.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vsadd.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vsadd.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vsadd.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vsadd.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vsadd.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vsadd.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vsadd.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vsadd.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vsadd.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vsadd.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vsadd.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vsadd.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vsadd.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vsadd.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vsadd.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vsadd.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vsadd.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vsadd.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vsadd.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vsadd.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vsadd.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vsadd.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vsadd.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vsadd.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vsadd.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vsadd.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vsadd.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vsadd.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vsaddu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vsaddu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vsaddu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vsaddu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vsaddu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vsaddu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vsaddu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vsaddu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vsaddu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vsaddu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vsaddu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vsaddu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vsaddu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vsaddu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vsaddu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vsaddu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vsaddu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vsaddu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vsaddu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vsaddu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vsaddu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vsaddu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vsaddu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vsaddu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vsaddu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vsaddu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vsaddu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vsaddu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vsaddu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vsaddu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vsaddu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vsaddu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vsaddu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vsaddu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vsaddu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vsaddu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vsaddu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vsaddu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vsaddu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vsaddu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vsaddu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vsaddu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vsaddu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -2041,6 +2129,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2073,6 +2162,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2105,6 +2195,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2137,6 +2228,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2169,6 +2261,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2201,6 +2294,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2233,6 +2327,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv64i8.i8( + undef, %0, i8 9, i32 %1) @@ -2265,6 +2360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2297,6 +2393,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2329,6 +2426,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2361,6 +2459,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2393,6 +2492,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2425,6 +2525,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2457,6 +2558,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2489,6 +2591,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2521,6 +2624,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2553,6 +2657,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2585,6 +2690,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2617,6 +2723,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2649,6 +2756,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2681,6 +2789,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2713,6 +2822,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vsaddu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vsaddu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vsaddu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vsaddu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vsaddu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vsaddu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vsaddu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vsaddu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vsaddu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vsaddu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vsaddu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vsaddu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vsaddu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vsaddu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vsaddu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vsaddu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vsaddu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vsaddu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vsaddu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vsaddu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vsaddu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vsaddu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vsaddu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vsaddu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vsaddu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vsaddu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vsaddu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vsaddu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vsaddu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vsaddu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vsaddu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vsaddu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vsaddu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vsaddu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vsaddu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vsaddu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vsaddu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vsaddu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vsaddu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vsaddu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vsaddu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vsaddu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vsaddu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsaddu.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -7,12 +7,12 @@ declare i64 @llvm.riscv.vsetvli(i64, i64, i64) -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64(, , i64) -declare @llvm.riscv.vfadd.nxv2f32.nxv2f32(, , i64) +declare @llvm.riscv.vfadd.nxv1f64.nxv1f64(, , , i64) +declare @llvm.riscv.vfadd.nxv2f32.nxv2f32(, , , i64) -declare @llvm.riscv.vfsub.nxv1f64.nxv1f64(, , i64) +declare @llvm.riscv.vfsub.nxv1f64.nxv1f64(, , , i64) -declare @llvm.riscv.vfmul.nxv1f64.nxv1f64(, , i64) +declare @llvm.riscv.vfmul.nxv1f64.nxv1f64(, , , i64) declare @llvm.riscv.vfmv.v.f.nxv1f64.f64(double, i64) declare @llvm.riscv.vfmv.v.f.nxv2f32.f32(float, i64) @@ -37,11 +37,11 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.else: ; preds = %entry - %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %0) + %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.end: ; preds = %if.else, %if.then @@ -70,16 +70,16 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.else: ; preds = %entry - %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %0) + %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.end: ; preds = %if.else, %if.then %c.0 = phi [ %1, %if.then ], [ %2, %if.else ] - %3 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %c.0, %a, i64 %0) + %3 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %c.0, %a, i64 %0) ret %3 } @@ -103,18 +103,18 @@ if.then: ; preds = %entry %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0) - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.else: ; preds = %entry %2 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0) - %3 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %2) + %3 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %2) br label %if.end if.end: ; preds = %if.else, %if.then %vl.0 = phi i64 [ %0, %if.then], [ %2, %if.else ] %c.0 = phi [ %1, %if.then ], [ %3, %if.else ] - %4 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %c.0, %a, i64 %vl.0) + %4 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %c.0, %a, i64 %vl.0) ret %4 } @@ -158,7 +158,7 @@ if.then: ; preds = %entry %0 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 1.000000e+00, i64 %avl) %1 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 2.000000e+00, i64 %avl) - %2 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %0, %1, i64 %avl) + %2 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %0, %1, i64 %avl) %3 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv1f64( %2, * %3, i64 %avl) br label %if.end @@ -166,13 +166,13 @@ if.else: ; preds = %entry %4 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 1.000000e+00, i64 %avl) %5 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 2.000000e+00, i64 %avl) - %6 = tail call @llvm.riscv.vfadd.nxv2f32.nxv2f32( %4, %5, i64 %avl) + %6 = tail call @llvm.riscv.vfadd.nxv2f32.nxv2f32( undef, %4, %5, i64 %avl) %7 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv2f32( %6, * %7, i64 %avl) br label %if.end if.end: ; preds = %if.else, %if.then - %8 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %l, %r, i64 %avl) + %8 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %l, %r, i64 %avl) ret %8 } @@ -204,11 +204,11 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.else: ; preds = %entry - %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %0) + %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.end: ; preds = %if.else, %if.then @@ -218,11 +218,11 @@ br i1 %tobool3, label %if.else5, label %if.then4 if.then4: ; preds = %if.end - %3 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %c.0, %a, i64 %0) + %3 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %c.0, %a, i64 %0) br label %if.end6 if.else5: ; preds = %if.end - %4 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %a, %c.0, i64 %0) + %4 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %a, %c.0, i64 %0) br label %if.end6 if.end6: ; preds = %if.else5, %if.then4 @@ -284,11 +284,11 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.else: ; preds = %entry - %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %0) + %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.end: ; preds = %if.else, %if.then @@ -301,7 +301,7 @@ %3 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0) %4 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 1.000000e+00, i64 %3) %5 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64(double 2.000000e+00, i64 %3) - %6 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %4, %5, i64 %3) + %6 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %4, %5, i64 %3) %7 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv1f64( %6, * %7, i64 %3) br label %if.end10 @@ -310,13 +310,13 @@ %8 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 0) %9 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 1.000000e+00, i64 %8) %10 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32(float 2.000000e+00, i64 %8) - %11 = tail call @llvm.riscv.vfadd.nxv2f32.nxv2f32( %9, %10, i64 %8) + %11 = tail call @llvm.riscv.vfadd.nxv2f32.nxv2f32( undef, %9, %10, i64 %8) %12 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv2f32( %11, * %12, i64 %8) br label %if.end10 if.end10: ; preds = %if.else5, %if.then4 - %13 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %c.0, %c.0, i64 %0) + %13 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %c.0, %c.0, i64 %0) ret %13 } @@ -368,12 +368,12 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.else: ; preds = %entry call void @foo() - %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %0) + %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.end: ; preds = %if.else, %if.then @@ -428,17 +428,17 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) call void @foo() br label %if.end if.else: ; preds = %entry - %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( %a, %b, i64 %0) + %2 = tail call @llvm.riscv.vfsub.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) br label %if.end if.end: ; preds = %if.else, %if.then %c.0 = phi [ %1, %if.then ], [ %2, %if.else ] - %3 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( %c.0, %a, i64 %0) + %3 = tail call @llvm.riscv.vfmul.nxv1f64.nxv1f64( undef, %c.0, %a, i64 %0) ret %3 } @@ -520,18 +520,18 @@ if: %b = call @llvm.riscv.vle.nxv2i16( undef, * %y, i64 %vl) - %c = call @llvm.riscv.vwadd.nxv2i32( %b, i16 0, i64 %vl) + %c = call @llvm.riscv.vwadd.nxv2i32( undef, %b, i16 0, i64 %vl) br label %if.end if.end: %d = phi [ %z, %entry ], [ %c, %if ] - %e = call @llvm.riscv.vadd.nxv2i32( %a, %d, i64 %vl) + %e = call @llvm.riscv.vadd.nxv2i32( undef, %a, %d, i64 %vl) ret %e } declare @llvm.riscv.vle.nxv2i32(, *, i64) declare @llvm.riscv.vle.nxv2i16(, *, i64) -declare @llvm.riscv.vwadd.nxv2i32(, i16, i64) -declare @llvm.riscv.vadd.nxv2i32(, , i64) +declare @llvm.riscv.vwadd.nxv2i32(, , i16, i64) +declare @llvm.riscv.vadd.nxv2i32(, , , i64) ; We can use X0, X0 vsetvli in if2 and if2.end. The merge point as if.end will ; see two different vtypes with the same SEW/LMUL ratio. At if2.end we will only @@ -566,7 +566,7 @@ if: %b = call @llvm.riscv.vle.nxv2i16( undef, * %y, i64 %vl) - %c = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( %a, %b, i64 %vl) + %c = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( undef, %a, %b, i64 %vl) br label %if.end if.end: @@ -575,12 +575,12 @@ if2: %e = call @llvm.riscv.vle.nxv2i16( undef, * %z, i64 %vl) - %f = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( %d, %e, i64 %vl) + %f = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( undef, %d, %e, i64 %vl) br label %if2.end if2.end: %g = phi [ %d, %if.end ], [ %f, %if2 ] - %h = call @llvm.riscv.vadd.nxv2i32( %g, %w, i64 %vl) + %h = call @llvm.riscv.vadd.nxv2i32( undef, %g, %w, i64 %vl) ret %h } -declare @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(, , i64) +declare @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(, , , i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -15,11 +15,11 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %a, %1, i64 %2) + %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %a, %1, i64 %2) br label %if.end if.else: ; preds = %entry - %c = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( %a, %1, i64 %2) + %c = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, %a, %1, i64 %2) br label %if.end if.end: ; preds = %if.else, %if.then @@ -56,11 +56,11 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %0, %1, i64 %2) + %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %0, %1, i64 %2) br label %if.end if.else: ; preds = %entry - %b = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( %1, %1, i64 %2) + %b = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, %1, %1, i64 %2) br label %if.end if.end: ; preds = %if.else, %if.then @@ -79,11 +79,11 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %0, %1, i64 %vl) + %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %0, %1, i64 %vl) br label %if.end if.else: ; preds = %entry - %c = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( %0, %1, i64 %vl) + %c = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, %0, %1, i64 %vl) br label %if.end if.end: ; preds = %if.else, %if.then @@ -125,10 +125,10 @@ declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) ; Function Attrs: nounwind readnone - declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , i64) #1 + declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , , i64) #1 ; Function Attrs: nounwind readnone - declare @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(, , i64) #1 + declare @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(, , , i64) #1 ; Function Attrs: nounwind readonly declare @llvm.riscv.vle.nxv1i64.i64(, * nocapture, i64) #3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -5,6 +5,7 @@ declare i64 @llvm.riscv.vsetvli(i64, i64, i64) declare i64 @llvm.riscv.vsetvlimax(i64, i64) declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( + , , , i64) @@ -23,6 +24,7 @@ entry: %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7) %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( + undef, %a, %b, i64 %0) @@ -38,6 +40,7 @@ entry: %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7) %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( + undef, %a, %b, i64 %avl) @@ -247,6 +250,7 @@ ; CHECK-NEXT: ret entry: %0 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( + undef, %a, %b, i64 -1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir @@ -10,14 +10,14 @@ define @add( %0, %1, i64 %2) #0 { entry: - %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %0, %1, i64 %2) + %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %0, %1, i64 %2) ret %a } define @load_add(* %0, %1, i64 %2) #0 { entry: %a = call @llvm.riscv.vle.nxv1i64.i64( undef, * %0, i64 %2) - %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %a, %1, i64 %2) + %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %a, %1, i64 %2) ret %b } @@ -60,7 +60,7 @@ define @vsetvli_add( %0, %1, i64 %avl) #0 { entry: %a = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 3, i64 0) - %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %0, %1, i64 %a) + %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %0, %1, i64 %a) ret %b } @@ -68,12 +68,12 @@ entry: %a = call @llvm.riscv.vle.nxv1i64.i64( undef, * %0, i64 %2) call void asm sideeffect "", ""() - %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %a, %1, i64 %2) + %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %a, %1, i64 %2) ret %b } ; Function Attrs: nounwind readnone - declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , i64) #1 + declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , , i64) #1 ; Function Attrs: nounwind readonly declare @llvm.riscv.vle.nxv1i64.i64(, * nocapture, i64) #4 diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vslide1down.nxv1i8.i8( + , , i8, i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vslide1down.nxv2i8.i8( + , , i8, i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vslide1down.nxv4i8.i8( + , , i8, i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vslide1down.nxv8i8.i8( + , , i8, i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vslide1down.nxv16i8.i8( + , , i8, i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vslide1down.nxv32i8.i8( + , , i8, i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vslide1down.nxv64i8.i8( + , , i8, i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -317,6 +331,7 @@ } declare @llvm.riscv.vslide1down.nxv1i16.i16( + , , i16, i32); @@ -329,6 +344,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -362,6 +378,7 @@ } declare @llvm.riscv.vslide1down.nxv2i16.i16( + , , i16, i32); @@ -374,6 +391,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -407,6 +425,7 @@ } declare @llvm.riscv.vslide1down.nxv4i16.i16( + , , i16, i32); @@ -419,6 +438,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -452,6 +472,7 @@ } declare @llvm.riscv.vslide1down.nxv8i16.i16( + , , i16, i32); @@ -464,6 +485,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -497,6 +519,7 @@ } declare @llvm.riscv.vslide1down.nxv16i16.i16( + , , i16, i32); @@ -509,6 +532,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -542,6 +566,7 @@ } declare @llvm.riscv.vslide1down.nxv32i16.i16( + , , i16, i32); @@ -554,6 +579,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -587,6 +613,7 @@ } declare @llvm.riscv.vslide1down.nxv1i32.i32( + , , i32, i32); @@ -599,6 +626,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -632,6 +660,7 @@ } declare @llvm.riscv.vslide1down.nxv2i32.i32( + , , i32, i32); @@ -644,6 +673,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -677,6 +707,7 @@ } declare @llvm.riscv.vslide1down.nxv4i32.i32( + , , i32, i32); @@ -689,6 +720,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -722,6 +754,7 @@ } declare @llvm.riscv.vslide1down.nxv8i32.i32( + , , i32, i32); @@ -734,6 +767,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -767,6 +801,7 @@ } declare @llvm.riscv.vslide1down.nxv16i32.i32( + , , i32, i32); @@ -779,6 +814,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -812,6 +848,7 @@ } declare @llvm.riscv.vslide1down.nxv1i64.i64( + , , i64, i32); @@ -826,6 +863,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -863,6 +901,7 @@ } declare @llvm.riscv.vslide1down.nxv2i64.i64( + , , i64, i32); @@ -877,6 +916,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -914,6 +954,7 @@ } declare @llvm.riscv.vslide1down.nxv4i64.i64( + , , i64, i32); @@ -928,6 +969,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -965,6 +1007,7 @@ } declare @llvm.riscv.vslide1down.nxv8i64.i64( + , , i64, i32); @@ -979,6 +1022,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vslide1down.nxv1i8.i8( + , , i8, i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vslide1down.nxv2i8.i8( + , , i8, i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vslide1down.nxv4i8.i8( + , , i8, i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vslide1down.nxv8i8.i8( + , , i8, i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vslide1down.nxv16i8.i8( + , , i8, i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vslide1down.nxv32i8.i8( + , , i8, i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vslide1down.nxv64i8.i8( + , , i8, i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -317,6 +331,7 @@ } declare @llvm.riscv.vslide1down.nxv1i16.i16( + , , i16, i64); @@ -329,6 +344,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -362,6 +378,7 @@ } declare @llvm.riscv.vslide1down.nxv2i16.i16( + , , i16, i64); @@ -374,6 +391,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -407,6 +425,7 @@ } declare @llvm.riscv.vslide1down.nxv4i16.i16( + , , i16, i64); @@ -419,6 +438,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -452,6 +472,7 @@ } declare @llvm.riscv.vslide1down.nxv8i16.i16( + , , i16, i64); @@ -464,6 +485,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -497,6 +519,7 @@ } declare @llvm.riscv.vslide1down.nxv16i16.i16( + , , i16, i64); @@ -509,6 +532,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -542,6 +566,7 @@ } declare @llvm.riscv.vslide1down.nxv32i16.i16( + , , i16, i64); @@ -554,6 +579,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -587,6 +613,7 @@ } declare @llvm.riscv.vslide1down.nxv1i32.i32( + , , i32, i64); @@ -599,6 +626,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -632,6 +660,7 @@ } declare @llvm.riscv.vslide1down.nxv2i32.i32( + , , i32, i64); @@ -644,6 +673,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -677,6 +707,7 @@ } declare @llvm.riscv.vslide1down.nxv4i32.i32( + , , i32, i64); @@ -689,6 +720,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -722,6 +754,7 @@ } declare @llvm.riscv.vslide1down.nxv8i32.i32( + , , i32, i64); @@ -734,6 +767,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -767,6 +801,7 @@ } declare @llvm.riscv.vslide1down.nxv16i32.i32( + , , i32, i64); @@ -779,6 +814,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -812,6 +848,7 @@ } declare @llvm.riscv.vslide1down.nxv1i64.i64( + , , i64, i64); @@ -824,6 +861,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -857,6 +895,7 @@ } declare @llvm.riscv.vslide1down.nxv2i64.i64( + , , i64, i64); @@ -869,6 +908,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -902,6 +942,7 @@ } declare @llvm.riscv.vslide1down.nxv4i64.i64( + , , i64, i64); @@ -914,6 +955,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -947,6 +989,7 @@ } declare @llvm.riscv.vslide1down.nxv8i64.i64( + , , i64, i64); @@ -959,6 +1002,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vslide1up.nxv1i8.i8( + , , i8, i32); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vslide1up.nxv2i8.i8( + , , i8, i32); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vslide1up.nxv4i8.i8( + , , i8, i32); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vslide1up.nxv8i8.i8( + , , i8, i32); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vslide1up.nxv16i8.i8( + , , i8, i32); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vslide1up.nxv32i8.i8( + , , i8, i32); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vslide1up.nxv64i8.i8( + , , i8, i32); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vslide1up.nxv1i16.i16( + , , i16, i32); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vslide1up.nxv2i16.i16( + , , i16, i32); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vslide1up.nxv4i16.i16( + , , i16, i32); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vslide1up.nxv8i16.i16( + , , i16, i32); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vslide1up.nxv16i16.i16( + , , i16, i32); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vslide1up.nxv32i16.i16( + , , i16, i32); @@ -567,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -600,6 +626,7 @@ } declare @llvm.riscv.vslide1up.nxv1i32.i32( + , , i32, i32); @@ -613,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -646,6 +674,7 @@ } declare @llvm.riscv.vslide1up.nxv2i32.i32( + , , i32, i32); @@ -659,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -692,6 +722,7 @@ } declare @llvm.riscv.vslide1up.nxv4i32.i32( + , , i32, i32); @@ -705,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -738,6 +770,7 @@ } declare @llvm.riscv.vslide1up.nxv8i32.i32( + , , i32, i32); @@ -751,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -784,6 +818,7 @@ } declare @llvm.riscv.vslide1up.nxv16i32.i32( + , , i32, i32); @@ -797,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -830,6 +866,7 @@ } declare @llvm.riscv.vslide1up.nxv1i64.i64( + , , i64, i32); @@ -844,6 +881,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -881,6 +919,7 @@ } declare @llvm.riscv.vslide1up.nxv2i64.i64( + , , i64, i32); @@ -895,6 +934,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -932,6 +972,7 @@ } declare @llvm.riscv.vslide1up.nxv4i64.i64( + , , i64, i32); @@ -946,6 +987,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -983,6 +1025,7 @@ } declare @llvm.riscv.vslide1up.nxv8i64.i64( + , , i64, i32); @@ -997,6 +1040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vslide1up.nxv1i8.i8( + , , i8, i64); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vslide1up.nxv2i8.i8( + , , i8, i64); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vslide1up.nxv4i8.i8( + , , i8, i64); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vslide1up.nxv8i8.i8( + , , i8, i64); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vslide1up.nxv16i8.i8( + , , i8, i64); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vslide1up.nxv32i8.i8( + , , i8, i64); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vslide1up.nxv64i8.i8( + , , i8, i64); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vslide1up.nxv1i16.i16( + , , i16, i64); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vslide1up.nxv2i16.i16( + , , i16, i64); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vslide1up.nxv4i16.i16( + , , i16, i64); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vslide1up.nxv8i16.i16( + , , i16, i64); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vslide1up.nxv16i16.i16( + , , i16, i64); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vslide1up.nxv32i16.i16( + , , i16, i64); @@ -567,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -600,6 +626,7 @@ } declare @llvm.riscv.vslide1up.nxv1i32.i32( + , , i32, i64); @@ -613,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -646,6 +674,7 @@ } declare @llvm.riscv.vslide1up.nxv2i32.i32( + , , i32, i64); @@ -659,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -692,6 +722,7 @@ } declare @llvm.riscv.vslide1up.nxv4i32.i32( + , , i32, i64); @@ -705,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -738,6 +770,7 @@ } declare @llvm.riscv.vslide1up.nxv8i32.i32( + , , i32, i64); @@ -751,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -784,6 +818,7 @@ } declare @llvm.riscv.vslide1up.nxv16i32.i32( + , , i32, i64); @@ -797,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -830,6 +866,7 @@ } declare @llvm.riscv.vslide1up.nxv1i64.i64( + , , i64, i64); @@ -843,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -876,6 +914,7 @@ } declare @llvm.riscv.vslide1up.nxv2i64.i64( + , , i64, i64); @@ -889,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -922,6 +962,7 @@ } declare @llvm.riscv.vslide1up.nxv4i64.i64( + , , i64, i64); @@ -935,6 +976,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -968,6 +1010,7 @@ } declare @llvm.riscv.vslide1up.nxv8i64.i64( + , , i64, i64); @@ -981,6 +1024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsll.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vsll.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vsll.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vsll.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vsll.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vsll.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vsll.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vsll.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vsll.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vsll.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vsll.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vsll.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vsll.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vsll.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vsll.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vsll.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vsll.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vsll.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vsll.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vsll.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vsll.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vsll.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vsll.nxv1i8( + , , i32, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8( + undef, %0, i32 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vsll.nxv2i8( + , , i32, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i8( + undef, %0, i32 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vsll.nxv4i8( + , , i32, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i8( + undef, %0, i32 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vsll.nxv8i8( + , , i32, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i8( + undef, %0, i32 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vsll.nxv16i8( + , , i32, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i8( + undef, %0, i32 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vsll.nxv32i8( + , , i32, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv32i8( + undef, %0, i32 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vsll.nxv64i8( + , , i32, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv64i8( + undef, %0, i32 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vsll.nxv1i16( + , , i32, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vsll.nxv2i16( + , , i32, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vsll.nxv4i16( + , , i32, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vsll.nxv8i16( + , , i32, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vsll.nxv16i16( + , , i32, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vsll.nxv32i16( + , , i32, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vsll.nxv1i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vsll.nxv2i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vsll.nxv4i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vsll.nxv8i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vsll.nxv16i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vsll.nxv1i64( + , , i32, i32); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vsll.nxv2i64( + , , i32, i32); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vsll.nxv4i64( + , , i32, i32); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vsll.nxv8i64( + , , i32, i32); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1993,6 +2081,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8( + undef, %0, i32 9, i32 %1) @@ -2008,6 +2097,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8( + undef, %0, i32 1, i32 %1) @@ -2057,6 +2147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i8( + undef, %0, i32 9, i32 %1) @@ -2089,6 +2180,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i8( + undef, %0, i32 9, i32 %1) @@ -2121,6 +2213,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i8( + undef, %0, i32 9, i32 %1) @@ -2153,6 +2246,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i8( + undef, %0, i32 9, i32 %1) @@ -2185,6 +2279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv32i8( + undef, %0, i32 9, i32 %1) @@ -2217,6 +2312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv64i8( + undef, %0, i32 9, i32 %1) @@ -2249,6 +2345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -2281,6 +2378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -2313,6 +2411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -2345,6 +2444,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -2377,6 +2477,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -2409,6 +2510,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -2441,6 +2543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -2473,6 +2576,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -2505,6 +2609,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -2537,6 +2642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -2569,6 +2675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -2601,6 +2708,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -2633,6 +2741,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -2665,6 +2774,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -2697,6 +2807,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsll.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vsll.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vsll.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vsll.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vsll.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vsll.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vsll.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vsll.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vsll.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vsll.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vsll.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vsll.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vsll.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vsll.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vsll.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vsll.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vsll.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vsll.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vsll.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vsll.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vsll.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vsll.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vsll.nxv1i8( + , , i64, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8( + undef, %0, i64 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vsll.nxv2i8( + , , i64, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i8( + undef, %0, i64 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vsll.nxv4i8( + , , i64, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i8( + undef, %0, i64 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vsll.nxv8i8( + , , i64, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i8( + undef, %0, i64 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vsll.nxv16i8( + , , i64, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i8( + undef, %0, i64 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vsll.nxv32i8( + , , i64, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv32i8( + undef, %0, i64 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vsll.nxv64i8( + , , i64, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv64i8( + undef, %0, i64 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vsll.nxv1i16( + , , i64, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vsll.nxv2i16( + , , i64, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vsll.nxv4i16( + , , i64, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vsll.nxv8i16( + , , i64, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vsll.nxv16i16( + , , i64, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vsll.nxv32i16( + , , i64, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vsll.nxv1i32( + , , i64, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vsll.nxv2i32( + , , i64, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vsll.nxv4i32( + , , i64, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vsll.nxv8i32( + , , i64, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vsll.nxv16i32( + , , i64, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vsll.nxv1i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vsll.nxv2i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vsll.nxv4i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vsll.nxv8i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8( + undef, %0, i64 9, i64 %1) @@ -2008,6 +2097,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i8( + undef, %0, i64 1, i64 %1) @@ -2057,6 +2147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i8( + undef, %0, i64 9, i64 %1) @@ -2089,6 +2180,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i8( + undef, %0, i64 9, i64 %1) @@ -2121,6 +2213,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i8( + undef, %0, i64 9, i64 %1) @@ -2153,6 +2246,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i8( + undef, %0, i64 9, i64 %1) @@ -2185,6 +2279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv32i8( + undef, %0, i64 9, i64 %1) @@ -2217,6 +2312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv64i8( + undef, %0, i64 9, i64 %1) @@ -2249,6 +2345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -2281,6 +2378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -2313,6 +2411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -2345,6 +2444,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -2377,6 +2477,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -2409,6 +2510,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -2441,6 +2543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -2473,6 +2576,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -2505,6 +2609,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -2537,6 +2642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -2569,6 +2675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -2697,6 +2807,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsmul.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vsmul.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vsmul.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vsmul.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vsmul.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vsmul.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vsmul.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vsmul.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vsmul.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vsmul.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vsmul.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vsmul.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vsmul.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vsmul.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vsmul.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vsmul.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vsmul.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vsmul.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vsmul.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vsmul.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vsmul.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vsmul.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vsmul.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vsmul.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vsmul.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vsmul.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vsmul.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vsmul.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vsmul.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vsmul.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vsmul.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vsmul.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vsmul.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vsmul.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vsmul.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vsmul.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vsmul.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vsmul.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vsmul.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vsmul.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vsmul.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vsmul.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vsmul.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vsmul.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsmul.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vsmul.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vsmul.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vsmul.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vsmul.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vsmul.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vsmul.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vsmul.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vsmul.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vsmul.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vsmul.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vsmul.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vsmul.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vsmul.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vsmul.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vsmul.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vsmul.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vsmul.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vsmul.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vsmul.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vsmul.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vsmul.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vsmul.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vsmul.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vsmul.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vsmul.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vsmul.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vsmul.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vsmul.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vsmul.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vsmul.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vsmul.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vsmul.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vsmul.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vsmul.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vsmul.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vsmul.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vsmul.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vsmul.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vsmul.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vsmul.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vsmul.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vsmul.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vsmul.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsra.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vsra.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vsra.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vsra.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vsra.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vsra.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vsra.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vsra.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vsra.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vsra.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vsra.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vsra.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vsra.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vsra.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vsra.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vsra.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vsra.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vsra.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vsra.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vsra.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vsra.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vsra.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vsra.nxv1i8( + , , i32, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i8( + undef, %0, i32 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vsra.nxv2i8( + , , i32, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i8( + undef, %0, i32 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vsra.nxv4i8( + , , i32, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i8( + undef, %0, i32 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vsra.nxv8i8( + , , i32, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i8( + undef, %0, i32 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vsra.nxv16i8( + , , i32, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i8( + undef, %0, i32 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vsra.nxv32i8( + , , i32, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv32i8( + undef, %0, i32 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vsra.nxv64i8( + , , i32, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv64i8( + undef, %0, i32 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vsra.nxv1i16( + , , i32, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vsra.nxv2i16( + , , i32, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vsra.nxv4i16( + , , i32, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vsra.nxv8i16( + , , i32, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vsra.nxv16i16( + , , i32, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vsra.nxv32i16( + , , i32, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vsra.nxv1i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vsra.nxv2i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vsra.nxv4i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vsra.nxv8i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vsra.nxv16i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vsra.nxv1i64( + , , i32, i32); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vsra.nxv2i64( + , , i32, i32); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vsra.nxv4i64( + , , i32, i32); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vsra.nxv8i64( + , , i32, i32); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1993,6 +2081,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i8( + undef, %0, i32 9, i32 %1) @@ -2025,6 +2114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i8( + undef, %0, i32 9, i32 %1) @@ -2057,6 +2147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i8( + undef, %0, i32 9, i32 %1) @@ -2089,6 +2180,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i8( + undef, %0, i32 9, i32 %1) @@ -2121,6 +2213,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i8( + undef, %0, i32 9, i32 %1) @@ -2153,6 +2246,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv32i8( + undef, %0, i32 9, i32 %1) @@ -2185,6 +2279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv64i8( + undef, %0, i32 9, i32 %1) @@ -2217,6 +2312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -2249,6 +2345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -2281,6 +2378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -2313,6 +2411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -2345,6 +2444,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -2377,6 +2477,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -2409,6 +2510,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -2441,6 +2543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -2473,6 +2576,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -2505,6 +2609,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -2537,6 +2642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -2569,6 +2675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -2601,6 +2708,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -2633,6 +2741,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -2665,6 +2774,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsra.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vsra.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vsra.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vsra.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vsra.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vsra.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vsra.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vsra.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vsra.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vsra.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vsra.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vsra.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vsra.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vsra.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vsra.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vsra.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vsra.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vsra.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vsra.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vsra.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vsra.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vsra.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vsra.nxv1i8( + , , i64, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i8( + undef, %0, i64 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vsra.nxv2i8( + , , i64, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i8( + undef, %0, i64 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vsra.nxv4i8( + , , i64, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i8( + undef, %0, i64 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vsra.nxv8i8( + , , i64, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i8( + undef, %0, i64 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vsra.nxv16i8( + , , i64, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i8( + undef, %0, i64 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vsra.nxv32i8( + , , i64, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv32i8( + undef, %0, i64 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vsra.nxv64i8( + , , i64, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv64i8( + undef, %0, i64 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vsra.nxv1i16( + , , i64, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vsra.nxv2i16( + , , i64, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vsra.nxv4i16( + , , i64, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vsra.nxv8i16( + , , i64, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vsra.nxv16i16( + , , i64, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vsra.nxv32i16( + , , i64, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vsra.nxv1i32( + , , i64, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vsra.nxv2i32( + , , i64, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vsra.nxv4i32( + , , i64, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vsra.nxv8i32( + , , i64, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vsra.nxv16i32( + , , i64, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vsra.nxv1i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vsra.nxv2i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vsra.nxv4i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vsra.nxv8i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i8( + undef, %0, i64 9, i64 %1) @@ -2025,6 +2114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i8( + undef, %0, i64 9, i64 %1) @@ -2057,6 +2147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i8( + undef, %0, i64 9, i64 %1) @@ -2089,6 +2180,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i8( + undef, %0, i64 9, i64 %1) @@ -2121,6 +2213,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i8( + undef, %0, i64 9, i64 %1) @@ -2153,6 +2246,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv32i8( + undef, %0, i64 9, i64 %1) @@ -2185,6 +2279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv64i8( + undef, %0, i64 9, i64 %1) @@ -2217,6 +2312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -2249,6 +2345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -2281,6 +2378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -2313,6 +2411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -2345,6 +2444,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -2377,6 +2477,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -2409,6 +2510,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -2441,6 +2543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -2473,6 +2576,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -2505,6 +2609,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -2537,6 +2642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -2569,6 +2675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsra.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsrl.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vsrl.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vsrl.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vsrl.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vsrl.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vsrl.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vsrl.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vsrl.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vsrl.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vsrl.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vsrl.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vsrl.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vsrl.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vsrl.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vsrl.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vsrl.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vsrl.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vsrl.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vsrl.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vsrl.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vsrl.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vsrl.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vsrl.nxv1i8( + , , i32, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i8( + undef, %0, i32 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vsrl.nxv2i8( + , , i32, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i8( + undef, %0, i32 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vsrl.nxv4i8( + , , i32, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i8( + undef, %0, i32 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vsrl.nxv8i8( + , , i32, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i8( + undef, %0, i32 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vsrl.nxv16i8( + , , i32, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i8( + undef, %0, i32 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vsrl.nxv32i8( + , , i32, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv32i8( + undef, %0, i32 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vsrl.nxv64i8( + , , i32, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv64i8( + undef, %0, i32 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vsrl.nxv1i16( + , , i32, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vsrl.nxv2i16( + , , i32, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vsrl.nxv4i16( + , , i32, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vsrl.nxv8i16( + , , i32, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vsrl.nxv16i16( + , , i32, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vsrl.nxv32i16( + , , i32, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vsrl.nxv1i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vsrl.nxv2i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vsrl.nxv4i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vsrl.nxv8i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vsrl.nxv16i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vsrl.nxv1i64( + , , i32, i32); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vsrl.nxv2i64( + , , i32, i32); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vsrl.nxv4i64( + , , i32, i32); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vsrl.nxv8i64( + , , i32, i32); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1993,6 +2081,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i8( + undef, %0, i32 9, i32 %1) @@ -2025,6 +2114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i8( + undef, %0, i32 9, i32 %1) @@ -2057,6 +2147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i8( + undef, %0, i32 9, i32 %1) @@ -2089,6 +2180,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i8( + undef, %0, i32 9, i32 %1) @@ -2121,6 +2213,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i8( + undef, %0, i32 9, i32 %1) @@ -2153,6 +2246,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv32i8( + undef, %0, i32 9, i32 %1) @@ -2185,6 +2279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv64i8( + undef, %0, i32 9, i32 %1) @@ -2217,6 +2312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -2249,6 +2345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -2281,6 +2378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -2313,6 +2411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -2345,6 +2444,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -2377,6 +2477,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -2409,6 +2510,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -2441,6 +2543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -2473,6 +2576,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -2505,6 +2609,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -2537,6 +2642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i32( + undef, %0, i32 9, i32 %1) @@ -2569,6 +2675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i64( + undef, %0, i32 9, i32 %1) @@ -2601,6 +2708,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i64( + undef, %0, i32 9, i32 %1) @@ -2633,6 +2741,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i64( + undef, %0, i32 9, i32 %1) @@ -2665,6 +2774,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i64( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsrl.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vsrl.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vsrl.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vsrl.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vsrl.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vsrl.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vsrl.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vsrl.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vsrl.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vsrl.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vsrl.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vsrl.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vsrl.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vsrl.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vsrl.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vsrl.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vsrl.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vsrl.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vsrl.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vsrl.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vsrl.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vsrl.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vsrl.nxv1i8( + , , i64, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i8( + undef, %0, i64 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vsrl.nxv2i8( + , , i64, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i8( + undef, %0, i64 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vsrl.nxv4i8( + , , i64, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i8( + undef, %0, i64 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vsrl.nxv8i8( + , , i64, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i8( + undef, %0, i64 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vsrl.nxv16i8( + , , i64, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i8( + undef, %0, i64 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vsrl.nxv32i8( + , , i64, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv32i8( + undef, %0, i64 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vsrl.nxv64i8( + , , i64, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv64i8( + undef, %0, i64 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vsrl.nxv1i16( + , , i64, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vsrl.nxv2i16( + , , i64, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vsrl.nxv4i16( + , , i64, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vsrl.nxv8i16( + , , i64, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vsrl.nxv16i16( + , , i64, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vsrl.nxv32i16( + , , i64, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vsrl.nxv1i32( + , , i64, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vsrl.nxv2i32( + , , i64, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vsrl.nxv4i32( + , , i64, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vsrl.nxv8i32( + , , i64, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vsrl.nxv16i32( + , , i64, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vsrl.nxv1i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vsrl.nxv2i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vsrl.nxv4i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vsrl.nxv8i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i8( + undef, %0, i64 9, i64 %1) @@ -2025,6 +2114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i8( + undef, %0, i64 9, i64 %1) @@ -2057,6 +2147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i8( + undef, %0, i64 9, i64 %1) @@ -2089,6 +2180,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i8( + undef, %0, i64 9, i64 %1) @@ -2121,6 +2213,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i8( + undef, %0, i64 9, i64 %1) @@ -2153,6 +2246,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv32i8( + undef, %0, i64 9, i64 %1) @@ -2185,6 +2279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv64i8( + undef, %0, i64 9, i64 %1) @@ -2217,6 +2312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -2249,6 +2345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -2281,6 +2378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -2313,6 +2411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -2345,6 +2444,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -2377,6 +2477,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -2409,6 +2510,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -2441,6 +2543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -2473,6 +2576,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -2505,6 +2609,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -2537,6 +2642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -2569,6 +2675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsrl.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vssra.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vssra.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vssra.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vssra.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vssra.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vssra.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vssra.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vssra.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vssra.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vssra.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vssra.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vssra.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vssra.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vssra.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vssra.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vssra.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vssra.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vssra.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vssra.nxv1i8( + , , i32, i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i8( + undef, %0, i32 %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vssra.nxv2i8( + , , i32, i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i8( + undef, %0, i32 %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vssra.nxv4i8( + , , i32, i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i8( + undef, %0, i32 %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vssra.nxv8i8( + , , i32, i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i8( + undef, %0, i32 %1, i32 %2) @@ -995,6 +1039,7 @@ } declare @llvm.riscv.vssra.nxv16i8( + , , i32, i32); @@ -1007,6 +1052,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i8( + undef, %0, i32 %1, i32 %2) @@ -1040,6 +1086,7 @@ } declare @llvm.riscv.vssra.nxv32i8( + , , i32, i32); @@ -1052,6 +1099,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv32i8( + undef, %0, i32 %1, i32 %2) @@ -1085,6 +1133,7 @@ } declare @llvm.riscv.vssra.nxv64i8( + , , i32, i32); @@ -1097,6 +1146,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv64i8( + undef, %0, i32 %1, i32 %2) @@ -1130,6 +1180,7 @@ } declare @llvm.riscv.vssra.nxv1i16( + , , i32, i32); @@ -1142,6 +1193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -1175,6 +1227,7 @@ } declare @llvm.riscv.vssra.nxv2i16( + , , i32, i32); @@ -1187,6 +1240,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -1220,6 +1274,7 @@ } declare @llvm.riscv.vssra.nxv4i16( + , , i32, i32); @@ -1232,6 +1287,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -1265,6 +1321,7 @@ } declare @llvm.riscv.vssra.nxv8i16( + , , i32, i32); @@ -1277,6 +1334,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -1310,6 +1368,7 @@ } declare @llvm.riscv.vssra.nxv16i16( + , , i32, i32); @@ -1322,6 +1381,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -1355,6 +1415,7 @@ } declare @llvm.riscv.vssra.nxv32i16( + , , i32, i32); @@ -1367,6 +1428,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -1400,6 +1462,7 @@ } declare @llvm.riscv.vssra.nxv1i32( + , , i32, i32); @@ -1412,6 +1475,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1445,6 +1509,7 @@ } declare @llvm.riscv.vssra.nxv2i32( + , , i32, i32); @@ -1457,6 +1522,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1490,6 +1556,7 @@ } declare @llvm.riscv.vssra.nxv4i32( + , , i32, i32); @@ -1502,6 +1569,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1535,6 +1603,7 @@ } declare @llvm.riscv.vssra.nxv8i32( + , , i32, i32); @@ -1547,6 +1616,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1580,6 +1650,7 @@ } declare @llvm.riscv.vssra.nxv16i32( + , , i32, i32); @@ -1592,6 +1663,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1625,6 +1697,7 @@ } declare @llvm.riscv.vssra.nxv1i64( + , , i32, i32); @@ -1637,6 +1710,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1670,6 +1744,7 @@ } declare @llvm.riscv.vssra.nxv2i64( + , , i32, i32); @@ -1682,6 +1757,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1715,6 +1791,7 @@ } declare @llvm.riscv.vssra.nxv4i64( + , , i32, i32); @@ -1727,6 +1804,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1760,6 +1838,7 @@ } declare @llvm.riscv.vssra.nxv8i64( + , , i32, i32); @@ -1772,6 +1851,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1812,6 +1892,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i8( + undef, %0, i32 9, i32 %1) @@ -1844,6 +1925,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i8( + undef, %0, i32 9, i32 %1) @@ -1876,6 +1958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i8( + undef, %0, i32 9, i32 %1) @@ -1908,6 +1991,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i8( + undef, %0, i32 9, i32 %1) @@ -1940,6 +2024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i8( + undef, %0, i32 9, i32 %1) @@ -1972,6 +2057,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv32i8( + undef, %0, i32 9, i32 %1) @@ -2004,6 +2090,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv64i8( + undef, %0, i32 9, i32 %1) @@ -2036,6 +2123,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -2068,6 +2156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -2100,6 +2189,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -2132,6 +2222,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -2164,6 +2255,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -2196,6 +2288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -2228,6 +2321,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -2260,6 +2354,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -2292,6 +2387,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -2324,6 +2420,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -2356,6 +2453,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i32( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vssra.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vssra.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vssra.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vssra.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vssra.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vssra.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vssra.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vssra.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vssra.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vssra.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vssra.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vssra.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vssra.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vssra.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vssra.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vssra.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vssra.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vssra.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vssra.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vssra.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vssra.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vssra.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vssra.nxv1i8( + , , i64, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i8( + undef, %0, i64 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vssra.nxv2i8( + , , i64, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i8( + undef, %0, i64 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vssra.nxv4i8( + , , i64, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i8( + undef, %0, i64 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vssra.nxv8i8( + , , i64, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i8( + undef, %0, i64 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vssra.nxv16i8( + , , i64, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i8( + undef, %0, i64 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vssra.nxv32i8( + , , i64, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv32i8( + undef, %0, i64 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vssra.nxv64i8( + , , i64, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv64i8( + undef, %0, i64 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vssra.nxv1i16( + , , i64, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vssra.nxv2i16( + , , i64, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vssra.nxv4i16( + , , i64, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vssra.nxv8i16( + , , i64, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vssra.nxv16i16( + , , i64, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vssra.nxv32i16( + , , i64, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vssra.nxv1i32( + , , i64, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vssra.nxv2i32( + , , i64, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vssra.nxv4i32( + , , i64, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vssra.nxv8i32( + , , i64, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vssra.nxv16i32( + , , i64, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vssra.nxv1i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vssra.nxv2i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vssra.nxv4i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vssra.nxv8i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i8( + undef, %0, i64 9, i64 %1) @@ -2025,6 +2114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i8( + undef, %0, i64 9, i64 %1) @@ -2057,6 +2147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i8( + undef, %0, i64 9, i64 %1) @@ -2089,6 +2180,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i8( + undef, %0, i64 9, i64 %1) @@ -2121,6 +2213,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i8( + undef, %0, i64 9, i64 %1) @@ -2153,6 +2246,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv32i8( + undef, %0, i64 9, i64 %1) @@ -2185,6 +2279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv64i8( + undef, %0, i64 9, i64 %1) @@ -2217,6 +2312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -2249,6 +2345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -2281,6 +2378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -2313,6 +2411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -2345,6 +2444,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -2377,6 +2477,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -2409,6 +2510,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -2441,6 +2543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -2473,6 +2576,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -2505,6 +2609,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -2537,6 +2642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -2569,6 +2675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssra.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vssrl.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vssrl.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vssrl.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vssrl.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vssrl.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vssrl.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vssrl.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vssrl.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vssrl.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vssrl.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vssrl.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vssrl.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vssrl.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vssrl.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vssrl.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vssrl.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vssrl.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vssrl.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vssrl.nxv1i8( + , , i32, i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i8( + undef, %0, i32 %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vssrl.nxv2i8( + , , i32, i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i8( + undef, %0, i32 %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vssrl.nxv4i8( + , , i32, i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i8( + undef, %0, i32 %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vssrl.nxv8i8( + , , i32, i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i8( + undef, %0, i32 %1, i32 %2) @@ -995,6 +1039,7 @@ } declare @llvm.riscv.vssrl.nxv16i8( + , , i32, i32); @@ -1007,6 +1052,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i8( + undef, %0, i32 %1, i32 %2) @@ -1040,6 +1086,7 @@ } declare @llvm.riscv.vssrl.nxv32i8( + , , i32, i32); @@ -1052,6 +1099,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv32i8( + undef, %0, i32 %1, i32 %2) @@ -1085,6 +1133,7 @@ } declare @llvm.riscv.vssrl.nxv64i8( + , , i32, i32); @@ -1097,6 +1146,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv64i8( + undef, %0, i32 %1, i32 %2) @@ -1130,6 +1180,7 @@ } declare @llvm.riscv.vssrl.nxv1i16( + , , i32, i32); @@ -1142,6 +1193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i16( + undef, %0, i32 %1, i32 %2) @@ -1175,6 +1227,7 @@ } declare @llvm.riscv.vssrl.nxv2i16( + , , i32, i32); @@ -1187,6 +1240,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i16( + undef, %0, i32 %1, i32 %2) @@ -1220,6 +1274,7 @@ } declare @llvm.riscv.vssrl.nxv4i16( + , , i32, i32); @@ -1232,6 +1287,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i16( + undef, %0, i32 %1, i32 %2) @@ -1265,6 +1321,7 @@ } declare @llvm.riscv.vssrl.nxv8i16( + , , i32, i32); @@ -1277,6 +1334,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i16( + undef, %0, i32 %1, i32 %2) @@ -1310,6 +1368,7 @@ } declare @llvm.riscv.vssrl.nxv16i16( + , , i32, i32); @@ -1322,6 +1381,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i16( + undef, %0, i32 %1, i32 %2) @@ -1355,6 +1415,7 @@ } declare @llvm.riscv.vssrl.nxv32i16( + , , i32, i32); @@ -1367,6 +1428,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv32i16( + undef, %0, i32 %1, i32 %2) @@ -1400,6 +1462,7 @@ } declare @llvm.riscv.vssrl.nxv1i32( + , , i32, i32); @@ -1412,6 +1475,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i32( + undef, %0, i32 %1, i32 %2) @@ -1445,6 +1509,7 @@ } declare @llvm.riscv.vssrl.nxv2i32( + , , i32, i32); @@ -1457,6 +1522,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i32( + undef, %0, i32 %1, i32 %2) @@ -1490,6 +1556,7 @@ } declare @llvm.riscv.vssrl.nxv4i32( + , , i32, i32); @@ -1502,6 +1569,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i32( + undef, %0, i32 %1, i32 %2) @@ -1535,6 +1603,7 @@ } declare @llvm.riscv.vssrl.nxv8i32( + , , i32, i32); @@ -1547,6 +1616,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i32( + undef, %0, i32 %1, i32 %2) @@ -1580,6 +1650,7 @@ } declare @llvm.riscv.vssrl.nxv16i32( + , , i32, i32); @@ -1592,6 +1663,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i32( + undef, %0, i32 %1, i32 %2) @@ -1625,6 +1697,7 @@ } declare @llvm.riscv.vssrl.nxv1i64( + , , i32, i32); @@ -1637,6 +1710,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i64( + undef, %0, i32 %1, i32 %2) @@ -1670,6 +1744,7 @@ } declare @llvm.riscv.vssrl.nxv2i64( + , , i32, i32); @@ -1682,6 +1757,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i64( + undef, %0, i32 %1, i32 %2) @@ -1715,6 +1791,7 @@ } declare @llvm.riscv.vssrl.nxv4i64( + , , i32, i32); @@ -1727,6 +1804,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i64( + undef, %0, i32 %1, i32 %2) @@ -1760,6 +1838,7 @@ } declare @llvm.riscv.vssrl.nxv8i64( + , , i32, i32); @@ -1772,6 +1851,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i64( + undef, %0, i32 %1, i32 %2) @@ -1812,6 +1892,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i8( + undef, %0, i32 9, i32 %1) @@ -1844,6 +1925,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i8( + undef, %0, i32 9, i32 %1) @@ -1876,6 +1958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i8( + undef, %0, i32 9, i32 %1) @@ -1908,6 +1991,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i8( + undef, %0, i32 9, i32 %1) @@ -1940,6 +2024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i8( + undef, %0, i32 9, i32 %1) @@ -1972,6 +2057,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv32i8( + undef, %0, i32 9, i32 %1) @@ -2004,6 +2090,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv64i8( + undef, %0, i32 9, i32 %1) @@ -2036,6 +2123,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i16( + undef, %0, i32 9, i32 %1) @@ -2068,6 +2156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i16( + undef, %0, i32 9, i32 %1) @@ -2100,6 +2189,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i16( + undef, %0, i32 9, i32 %1) @@ -2132,6 +2222,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i16( + undef, %0, i32 9, i32 %1) @@ -2164,6 +2255,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i16( + undef, %0, i32 9, i32 %1) @@ -2196,6 +2288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv32i16( + undef, %0, i32 9, i32 %1) @@ -2228,6 +2321,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i32( + undef, %0, i32 9, i32 %1) @@ -2260,6 +2354,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i32( + undef, %0, i32 9, i32 %1) @@ -2292,6 +2387,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i32( + undef, %0, i32 9, i32 %1) @@ -2324,6 +2420,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i32( + undef, %0, i32 9, i32 %1) @@ -2356,6 +2453,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i32( + undef, %0, i32 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vssrl.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vssrl.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vssrl.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vssrl.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vssrl.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vssrl.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vssrl.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vssrl.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vssrl.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vssrl.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vssrl.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vssrl.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vssrl.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vssrl.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vssrl.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vssrl.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vssrl.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vssrl.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vssrl.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vssrl.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vssrl.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vssrl.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vssrl.nxv1i8( + , , i64, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i8( + undef, %0, i64 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vssrl.nxv2i8( + , , i64, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i8( + undef, %0, i64 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vssrl.nxv4i8( + , , i64, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i8( + undef, %0, i64 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vssrl.nxv8i8( + , , i64, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i8( + undef, %0, i64 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vssrl.nxv16i8( + , , i64, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i8( + undef, %0, i64 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vssrl.nxv32i8( + , , i64, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv32i8( + undef, %0, i64 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vssrl.nxv64i8( + , , i64, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv64i8( + undef, %0, i64 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vssrl.nxv1i16( + , , i64, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i16( + undef, %0, i64 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vssrl.nxv2i16( + , , i64, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i16( + undef, %0, i64 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vssrl.nxv4i16( + , , i64, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i16( + undef, %0, i64 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vssrl.nxv8i16( + , , i64, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i16( + undef, %0, i64 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vssrl.nxv16i16( + , , i64, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i16( + undef, %0, i64 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vssrl.nxv32i16( + , , i64, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv32i16( + undef, %0, i64 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vssrl.nxv1i32( + , , i64, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i32( + undef, %0, i64 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vssrl.nxv2i32( + , , i64, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i32( + undef, %0, i64 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vssrl.nxv4i32( + , , i64, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i32( + undef, %0, i64 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vssrl.nxv8i32( + , , i64, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i32( + undef, %0, i64 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vssrl.nxv16i32( + , , i64, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i32( + undef, %0, i64 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vssrl.nxv1i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vssrl.nxv2i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vssrl.nxv4i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vssrl.nxv8i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i8( + undef, %0, i64 9, i64 %1) @@ -2025,6 +2114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i8( + undef, %0, i64 9, i64 %1) @@ -2057,6 +2147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i8( + undef, %0, i64 9, i64 %1) @@ -2089,6 +2180,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i8( + undef, %0, i64 9, i64 %1) @@ -2121,6 +2213,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i8( + undef, %0, i64 9, i64 %1) @@ -2153,6 +2246,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv32i8( + undef, %0, i64 9, i64 %1) @@ -2185,6 +2279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv64i8( + undef, %0, i64 9, i64 %1) @@ -2217,6 +2312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i16( + undef, %0, i64 9, i64 %1) @@ -2249,6 +2345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i16( + undef, %0, i64 9, i64 %1) @@ -2281,6 +2378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i16( + undef, %0, i64 9, i64 %1) @@ -2313,6 +2411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i16( + undef, %0, i64 9, i64 %1) @@ -2345,6 +2444,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i16( + undef, %0, i64 9, i64 %1) @@ -2377,6 +2477,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv32i16( + undef, %0, i64 9, i64 %1) @@ -2409,6 +2510,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i32( + undef, %0, i64 9, i64 %1) @@ -2441,6 +2543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i32( + undef, %0, i64 9, i64 %1) @@ -2473,6 +2576,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i32( + undef, %0, i64 9, i64 %1) @@ -2505,6 +2609,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i32( + undef, %0, i64 9, i64 %1) @@ -2537,6 +2642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv16i32( + undef, %0, i64 9, i64 %1) @@ -2569,6 +2675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv1i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv2i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv4i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.nxv8i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vssub.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vssub.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vssub.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vssub.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vssub.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vssub.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vssub.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vssub.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vssub.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vssub.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vssub.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vssub.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vssub.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vssub.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vssub.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vssub.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vssub.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vssub.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vssub.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vssub.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vssub.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vssub.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vssub.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vssub.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vssub.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vssub.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vssub.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vssub.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vssub.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vssub.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vssub.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vssub.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vssub.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vssub.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vssub.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vssub.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vssub.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vssub.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vssub.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vssub.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vssub.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vssub.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vssub.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vssub.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vssub.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vssub.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vssub.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vssub.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vssub.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vssub.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vssub.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vssub.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vssub.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vssub.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vssub.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vssub.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vssub.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vssub.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vssub.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vssub.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vssub.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vssub.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vssub.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vssub.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vssub.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vssub.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vssub.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vssub.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vssub.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vssub.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vssub.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vssub.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vssub.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vssub.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vssub.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vssub.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vssub.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vssub.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vssub.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vssub.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vssub.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vssub.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vssub.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vssub.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vssub.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vssub.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vssub.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vssub.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssub.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vssubu.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vssubu.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vssubu.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vssubu.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vssubu.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vssubu.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vssubu.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vssubu.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vssubu.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vssubu.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vssubu.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vssubu.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vssubu.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vssubu.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vssubu.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vssubu.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vssubu.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vssubu.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vssubu.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vssubu.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vssubu.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vssubu.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vssubu.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vssubu.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vssubu.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vssubu.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vssubu.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vssubu.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vssubu.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vssubu.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vssubu.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vssubu.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vssubu.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vssubu.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vssubu.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vssubu.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vssubu.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vssubu.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vssubu.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vssubu.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vssubu.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vssubu.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vssubu.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vssubu.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vssubu.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vssubu.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vssubu.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vssubu.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vssubu.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vssubu.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vssubu.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vssubu.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vssubu.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vssubu.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vssubu.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vssubu.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vssubu.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vssubu.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vssubu.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vssubu.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vssubu.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vssubu.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vssubu.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vssubu.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vssubu.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vssubu.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vssubu.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vssubu.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vssubu.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vssubu.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vssubu.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vssubu.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vssubu.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vssubu.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vssubu.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vssubu.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vssubu.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vssubu.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vssubu.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vssubu.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vssubu.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vssubu.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vssubu.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vssubu.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vssubu.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vssubu.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vssubu.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vssubu.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssubu.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsub.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vsub.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vsub.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vsub.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vsub.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vsub.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vsub.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vsub.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vsub.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vsub.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vsub.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vsub.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vsub.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vsub.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vsub.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vsub.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vsub.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vsub.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vsub.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vsub.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vsub.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vsub.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vsub.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vsub.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vsub.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vsub.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vsub.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vsub.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vsub.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vsub.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vsub.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vsub.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vsub.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vsub.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vsub.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vsub.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vsub.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vsub.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vsub.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vsub.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vsub.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vsub.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vsub.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vsub.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -2041,6 +2129,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2073,6 +2162,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2105,6 +2195,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2137,6 +2228,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2169,6 +2261,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2201,6 +2294,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2233,6 +2327,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv64i8.i8( + undef, %0, i8 -9, i32 %1) @@ -2265,6 +2360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2297,6 +2393,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2329,6 +2426,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2361,6 +2459,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2393,6 +2492,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2425,6 +2525,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2457,6 +2558,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2489,6 +2591,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2521,6 +2624,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2553,6 +2657,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2585,6 +2690,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2617,6 +2723,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2649,6 +2756,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2681,6 +2789,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2713,6 +2822,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vsub.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vsub.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vsub.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vsub.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vsub.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vsub.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vsub.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vsub.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vsub.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vsub.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vsub.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vsub.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vsub.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vsub.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vsub.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vsub.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vsub.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vsub.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vsub.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vsub.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vsub.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vsub.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vsub.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vsub.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vsub.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vsub.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vsub.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vsub.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vsub.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vsub.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vsub.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vsub.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vsub.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vsub.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vsub.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vsub.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vsub.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vsub.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vsub.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vsub.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vsub.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vsub.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vsub.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vsub.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.nxv8i64.i64( + undef, %0, i64 9, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ } declare @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ } declare @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ } declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ } declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ } declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ } declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ } declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ } declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ } declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ } declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ } declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ } declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ } declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ } declare @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ } declare @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ } declare @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ } declare @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ } declare @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ } declare @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ } declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ } declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ } declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ } declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ } declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ } declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ } declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ } declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ } declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ } declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ } declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ } declare @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ } declare @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ } declare @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ } declare @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs -early-live-intervals < %s | FileCheck %s declare @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -273,6 +285,7 @@ } declare @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + , , , i32); @@ -285,6 +298,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vwadd.w.nxv16i32.nxv16i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -499,6 +521,7 @@ } declare @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( + , , , i32); @@ -511,6 +534,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -544,6 +568,7 @@ } declare @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( + , , , i32); @@ -556,6 +581,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -680,6 +710,7 @@ } declare @llvm.riscv.vwadd.w.nxv1i16.i8( + , , i8, i32); @@ -692,6 +723,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv1i16.i8( + undef, %0, i8 %1, i32 %2) @@ -725,6 +757,7 @@ } declare @llvm.riscv.vwadd.w.nxv2i16.i8( + , , i8, i32); @@ -737,6 +770,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv2i16.i8( + undef, %0, i8 %1, i32 %2) @@ -770,6 +804,7 @@ } declare @llvm.riscv.vwadd.w.nxv4i16.i8( + , , i8, i32); @@ -782,6 +817,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv4i16.i8( + undef, %0, i8 %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vwadd.w.nxv8i16.i8( + , , i8, i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv8i16.i8( + undef, %0, i8 %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vwadd.w.nxv16i16.i8( + , , i8, i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv16i16.i8( + undef, %0, i8 %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vwadd.w.nxv32i16.i8( + , , i8, i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv32i16.i8( + undef, %0, i8 %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vwadd.w.nxv1i32.i16( + , , i16, i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv1i32.i16( + undef, %0, i16 %1, i32 %2) @@ -995,6 +1039,7 @@ } declare @llvm.riscv.vwadd.w.nxv2i32.i16( + , , i16, i32); @@ -1007,6 +1052,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv2i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1040,6 +1086,7 @@ } declare @llvm.riscv.vwadd.w.nxv4i32.i16( + , , i16, i32); @@ -1052,6 +1099,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv4i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1085,6 +1133,7 @@ } declare @llvm.riscv.vwadd.w.nxv8i32.i16( + , , i16, i32); @@ -1097,6 +1146,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv8i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1130,6 +1180,7 @@ } declare @llvm.riscv.vwadd.w.nxv16i32.i16( + , , i16, i32); @@ -1142,6 +1193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv16i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1175,6 +1227,7 @@ } declare @llvm.riscv.vwadd.w.nxv1i64.i32( + , , i32, i32); @@ -1187,6 +1240,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv1i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1220,6 +1274,7 @@ } declare @llvm.riscv.vwadd.w.nxv2i64.i32( + , , i32, i32); @@ -1232,6 +1287,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv2i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1265,6 +1321,7 @@ } declare @llvm.riscv.vwadd.w.nxv4i64.i32( + , , i32, i32); @@ -1277,6 +1334,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv4i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1310,6 +1368,7 @@ } declare @llvm.riscv.vwadd.w.nxv8i64.i32( + , , i32, i32); @@ -1322,6 +1381,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv8i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1873,6 +1933,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + undef, %1, %0, i32 %2) @@ -1889,6 +1950,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + undef, %1, %0, i32 %2) @@ -1905,6 +1967,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + undef, %1, %0, i32 %2) @@ -1921,6 +1984,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + undef, %1, %0, i32 %2) @@ -1937,6 +2001,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + undef, %1, %0, i32 %2) @@ -1953,6 +2018,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + undef, %1, %0, i32 %2) @@ -1969,6 +2035,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + undef, %1, %0, i32 %2) @@ -1985,6 +2052,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + undef, %1, %0, i32 %2) @@ -2001,6 +2069,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + undef, %1, %0, i32 %2) @@ -2017,6 +2086,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + undef, %1, %0, i32 %2) @@ -2033,6 +2103,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( + undef, %1, %0, i32 %2) @@ -2049,6 +2120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( + undef, %1, %0, i32 %2) @@ -2065,6 +2137,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( + undef, %1, %0, i32 %2) @@ -2081,6 +2154,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( + undef, %1, %0, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs -early-live-intervals < %s | FileCheck %s declare @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -273,6 +285,7 @@ } declare @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + , , , i64); @@ -285,6 +298,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vwadd.w.nxv16i32.nxv16i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -499,6 +521,7 @@ } declare @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( + , , , i64); @@ -511,6 +534,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -544,6 +568,7 @@ } declare @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( + , , , i64); @@ -556,6 +581,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -680,6 +710,7 @@ } declare @llvm.riscv.vwadd.w.nxv1i16.i8( + , , i8, i64); @@ -692,6 +723,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv1i16.i8( + undef, %0, i8 %1, i64 %2) @@ -725,6 +757,7 @@ } declare @llvm.riscv.vwadd.w.nxv2i16.i8( + , , i8, i64); @@ -737,6 +770,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv2i16.i8( + undef, %0, i8 %1, i64 %2) @@ -770,6 +804,7 @@ } declare @llvm.riscv.vwadd.w.nxv4i16.i8( + , , i8, i64); @@ -782,6 +817,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv4i16.i8( + undef, %0, i8 %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vwadd.w.nxv8i16.i8( + , , i8, i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv8i16.i8( + undef, %0, i8 %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vwadd.w.nxv16i16.i8( + , , i8, i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv16i16.i8( + undef, %0, i8 %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vwadd.w.nxv32i16.i8( + , , i8, i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv32i16.i8( + undef, %0, i8 %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vwadd.w.nxv1i32.i16( + , , i16, i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv1i32.i16( + undef, %0, i16 %1, i64 %2) @@ -995,6 +1039,7 @@ } declare @llvm.riscv.vwadd.w.nxv2i32.i16( + , , i16, i64); @@ -1007,6 +1052,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv2i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1040,6 +1086,7 @@ } declare @llvm.riscv.vwadd.w.nxv4i32.i16( + , , i16, i64); @@ -1052,6 +1099,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv4i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1085,6 +1133,7 @@ } declare @llvm.riscv.vwadd.w.nxv8i32.i16( + , , i16, i64); @@ -1097,6 +1146,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv8i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1130,6 +1180,7 @@ } declare @llvm.riscv.vwadd.w.nxv16i32.i16( + , , i16, i64); @@ -1142,6 +1193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv16i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1175,6 +1227,7 @@ } declare @llvm.riscv.vwadd.w.nxv1i64.i32( + , , i32, i64); @@ -1187,6 +1240,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv1i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1220,6 +1274,7 @@ } declare @llvm.riscv.vwadd.w.nxv2i64.i32( + , , i32, i64); @@ -1232,6 +1287,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv2i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1265,6 +1321,7 @@ } declare @llvm.riscv.vwadd.w.nxv4i64.i32( + , , i32, i64); @@ -1277,6 +1334,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv4i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1310,6 +1368,7 @@ } declare @llvm.riscv.vwadd.w.nxv8i64.i32( + , , i32, i64); @@ -1322,6 +1381,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv8i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1873,6 +1933,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + undef, %1, %0, i64 %2) @@ -1889,6 +1950,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + undef, %1, %0, i64 %2) @@ -1905,6 +1967,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + undef, %1, %0, i64 %2) @@ -1921,6 +1984,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + undef, %1, %0, i64 %2) @@ -1937,6 +2001,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + undef, %1, %0, i64 %2) @@ -1953,6 +2018,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + undef, %1, %0, i64 %2) @@ -1969,6 +2035,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + undef, %1, %0, i64 %2) @@ -1985,6 +2052,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + undef, %1, %0, i64 %2) @@ -2001,6 +2069,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + undef, %1, %0, i64 %2) @@ -2017,6 +2086,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + undef, %1, %0, i64 %2) @@ -2033,6 +2103,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( + undef, %1, %0, i64 %2) @@ -2049,6 +2120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( + undef, %1, %0, i64 %2) @@ -2065,6 +2137,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( + undef, %1, %0, i64 %2) @@ -2081,6 +2154,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( + undef, %1, %0, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ } declare @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ } declare @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ } declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ } declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ } declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ } declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ } declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ } declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ } declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ } declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ } declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ } declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ } declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ } declare @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ } declare @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ } declare @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ } declare @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ } declare @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ } declare @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ } declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ } declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ } declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ } declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ } declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ } declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ } declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ } declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ } declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ } declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ } declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ } declare @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ } declare @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ } declare @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ } declare @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -273,6 +285,7 @@ } declare @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + , , , i32); @@ -285,6 +298,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -499,6 +521,7 @@ } declare @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( + , , , i32); @@ -511,6 +534,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -544,6 +568,7 @@ } declare @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( + , , , i32); @@ -556,6 +581,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -680,6 +710,7 @@ } declare @llvm.riscv.vwaddu.w.nxv1i16.i8( + , , i8, i32); @@ -692,6 +723,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i16.i8( + undef, %0, i8 %1, i32 %2) @@ -725,6 +757,7 @@ } declare @llvm.riscv.vwaddu.w.nxv2i16.i8( + , , i8, i32); @@ -737,6 +770,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i16.i8( + undef, %0, i8 %1, i32 %2) @@ -770,6 +804,7 @@ } declare @llvm.riscv.vwaddu.w.nxv4i16.i8( + , , i8, i32); @@ -782,6 +817,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i16.i8( + undef, %0, i8 %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vwaddu.w.nxv8i16.i8( + , , i8, i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i16.i8( + undef, %0, i8 %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vwaddu.w.nxv16i16.i8( + , , i8, i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv16i16.i8( + undef, %0, i8 %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vwaddu.w.nxv32i16.i8( + , , i8, i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv32i16.i8( + undef, %0, i8 %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vwaddu.w.nxv1i32.i16( + , , i16, i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i32.i16( + undef, %0, i16 %1, i32 %2) @@ -995,6 +1039,7 @@ } declare @llvm.riscv.vwaddu.w.nxv2i32.i16( + , , i16, i32); @@ -1007,6 +1052,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1040,6 +1086,7 @@ } declare @llvm.riscv.vwaddu.w.nxv4i32.i16( + , , i16, i32); @@ -1052,6 +1099,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1085,6 +1133,7 @@ } declare @llvm.riscv.vwaddu.w.nxv8i32.i16( + , , i16, i32); @@ -1097,6 +1146,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1130,6 +1180,7 @@ } declare @llvm.riscv.vwaddu.w.nxv16i32.i16( + , , i16, i32); @@ -1142,6 +1193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv16i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1175,6 +1227,7 @@ } declare @llvm.riscv.vwaddu.w.nxv1i64.i32( + , , i32, i32); @@ -1187,6 +1240,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1220,6 +1274,7 @@ } declare @llvm.riscv.vwaddu.w.nxv2i64.i32( + , , i32, i32); @@ -1232,6 +1287,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1265,6 +1321,7 @@ } declare @llvm.riscv.vwaddu.w.nxv4i64.i32( + , , i32, i32); @@ -1277,6 +1334,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1310,6 +1368,7 @@ } declare @llvm.riscv.vwaddu.w.nxv8i64.i32( + , , i32, i32); @@ -1322,6 +1381,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1873,6 +1933,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + undef, %1, %0, i32 %2) @@ -1889,6 +1950,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + undef, %1, %0, i32 %2) @@ -1905,6 +1967,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + undef, %1, %0, i32 %2) @@ -1921,6 +1984,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + undef, %1, %0, i32 %2) @@ -1937,6 +2001,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + undef, %1, %0, i32 %2) @@ -1953,6 +2018,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + undef, %1, %0, i32 %2) @@ -1969,6 +2035,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + undef, %1, %0, i32 %2) @@ -1985,6 +2052,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + undef, %1, %0, i32 %2) @@ -2001,6 +2069,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + undef, %1, %0, i32 %2) @@ -2017,6 +2086,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + undef, %1, %0, i32 %2) @@ -2033,6 +2103,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( + undef, %1, %0, i32 %2) @@ -2049,6 +2120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( + undef, %1, %0, i32 %2) @@ -2065,6 +2137,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( + undef, %1, %0, i32 %2) @@ -2081,6 +2154,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( + undef, %1, %0, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -273,6 +285,7 @@ } declare @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + , , , i64); @@ -285,6 +298,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -499,6 +521,7 @@ } declare @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( + , , , i64); @@ -511,6 +534,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -544,6 +568,7 @@ } declare @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( + , , , i64); @@ -556,6 +581,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -680,6 +710,7 @@ } declare @llvm.riscv.vwaddu.w.nxv1i16.i8( + , , i8, i64); @@ -692,6 +723,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i16.i8( + undef, %0, i8 %1, i64 %2) @@ -725,6 +757,7 @@ } declare @llvm.riscv.vwaddu.w.nxv2i16.i8( + , , i8, i64); @@ -737,6 +770,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i16.i8( + undef, %0, i8 %1, i64 %2) @@ -770,6 +804,7 @@ } declare @llvm.riscv.vwaddu.w.nxv4i16.i8( + , , i8, i64); @@ -782,6 +817,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i16.i8( + undef, %0, i8 %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vwaddu.w.nxv8i16.i8( + , , i8, i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i16.i8( + undef, %0, i8 %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vwaddu.w.nxv16i16.i8( + , , i8, i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv16i16.i8( + undef, %0, i8 %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vwaddu.w.nxv32i16.i8( + , , i8, i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv32i16.i8( + undef, %0, i8 %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vwaddu.w.nxv1i32.i16( + , , i16, i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i32.i16( + undef, %0, i16 %1, i64 %2) @@ -995,6 +1039,7 @@ } declare @llvm.riscv.vwaddu.w.nxv2i32.i16( + , , i16, i64); @@ -1007,6 +1052,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1040,6 +1086,7 @@ } declare @llvm.riscv.vwaddu.w.nxv4i32.i16( + , , i16, i64); @@ -1052,6 +1099,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1085,6 +1133,7 @@ } declare @llvm.riscv.vwaddu.w.nxv8i32.i16( + , , i16, i64); @@ -1097,6 +1146,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1130,6 +1180,7 @@ } declare @llvm.riscv.vwaddu.w.nxv16i32.i16( + , , i16, i64); @@ -1142,6 +1193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv16i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1175,6 +1227,7 @@ } declare @llvm.riscv.vwaddu.w.nxv1i64.i32( + , , i32, i64); @@ -1187,6 +1240,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1220,6 +1274,7 @@ } declare @llvm.riscv.vwaddu.w.nxv2i64.i32( + , , i32, i64); @@ -1232,6 +1287,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1265,6 +1321,7 @@ } declare @llvm.riscv.vwaddu.w.nxv4i64.i32( + , , i32, i64); @@ -1277,6 +1334,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1310,6 +1368,7 @@ } declare @llvm.riscv.vwaddu.w.nxv8i64.i32( + , , i32, i64); @@ -1322,6 +1381,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1873,6 +1933,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + undef, %1, %0, i64 %2) @@ -1889,6 +1950,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + undef, %1, %0, i64 %2) @@ -1905,6 +1967,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + undef, %1, %0, i64 %2) @@ -1921,6 +1984,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + undef, %1, %0, i64 %2) @@ -1937,6 +2001,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + undef, %1, %0, i64 %2) @@ -1953,6 +2018,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + undef, %1, %0, i64 %2) @@ -1969,6 +2035,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + undef, %1, %0, i64 %2) @@ -1985,6 +2052,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + undef, %1, %0, i64 %2) @@ -2001,6 +2069,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + undef, %1, %0, i64 %2) @@ -2017,6 +2086,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + undef, %1, %0, i64 %2) @@ -2033,6 +2103,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( + undef, %1, %0, i64 %2) @@ -2049,6 +2120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( + undef, %1, %0, i64 %2) @@ -2065,6 +2137,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( + undef, %1, %0, i64 %2) @@ -2081,6 +2154,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( + undef, %1, %0, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ } declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ } declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ } declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ } declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ } declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ } declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ } declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ } declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ } declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ } declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ } declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ } declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ } declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ } declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ } declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ } declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ } declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ } declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ } declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ } declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ } declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ } declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ } declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ } declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ } declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ } declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ } declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ } declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ } declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ } declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ } declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ } declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ } declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ } declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ } declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ } declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ } declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ } declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ } declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ } declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ } declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ } declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ } declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ } declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ } declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ } declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ } declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ } declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ } declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ } declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ } declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ } declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ } declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ } declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ } declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ } declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ } declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ } declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ } declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ } declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ } declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ } declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ } declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ } declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ } declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ } declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ } declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ } declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ } declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ } declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ } declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ } declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ } declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ } declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ } declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ } declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ } declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ } declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ } declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ } declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ } declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ } declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ } declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ } declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ } declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ } declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ } declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ } declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ } declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ } declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ } declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ } declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ } declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ } declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ } declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ } declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ } declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ } declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ } declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ } declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ } declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ } declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ } declare @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ } declare @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ } declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ } declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ } declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ } declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ } declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ } declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ } declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ } declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ } declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ } declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ } declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ } declare @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ } declare @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ } declare @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ } declare @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ } declare @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ } declare @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ } declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ } declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ } declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ } declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ } declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ } declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ } declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ } declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ } declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ } declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ } declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ } declare @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ } declare @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ } declare @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ } declare @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -273,6 +285,7 @@ } declare @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + , , , i32); @@ -285,6 +298,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vwsub.w.nxv16i32.nxv16i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -499,6 +521,7 @@ } declare @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( + , , , i32); @@ -511,6 +534,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -544,6 +568,7 @@ } declare @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( + , , , i32); @@ -556,6 +581,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -680,6 +710,7 @@ } declare @llvm.riscv.vwsub.w.nxv1i16.i8( + , , i8, i32); @@ -692,6 +723,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv1i16.i8( + undef, %0, i8 %1, i32 %2) @@ -725,6 +757,7 @@ } declare @llvm.riscv.vwsub.w.nxv2i16.i8( + , , i8, i32); @@ -737,6 +770,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv2i16.i8( + undef, %0, i8 %1, i32 %2) @@ -770,6 +804,7 @@ } declare @llvm.riscv.vwsub.w.nxv4i16.i8( + , , i8, i32); @@ -782,6 +817,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv4i16.i8( + undef, %0, i8 %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vwsub.w.nxv8i16.i8( + , , i8, i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv8i16.i8( + undef, %0, i8 %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vwsub.w.nxv16i16.i8( + , , i8, i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv16i16.i8( + undef, %0, i8 %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vwsub.w.nxv32i16.i8( + , , i8, i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv32i16.i8( + undef, %0, i8 %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vwsub.w.nxv1i32.i16( + , , i16, i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv1i32.i16( + undef, %0, i16 %1, i32 %2) @@ -995,6 +1039,7 @@ } declare @llvm.riscv.vwsub.w.nxv2i32.i16( + , , i16, i32); @@ -1007,6 +1052,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv2i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1040,6 +1086,7 @@ } declare @llvm.riscv.vwsub.w.nxv4i32.i16( + , , i16, i32); @@ -1052,6 +1099,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv4i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1085,6 +1133,7 @@ } declare @llvm.riscv.vwsub.w.nxv8i32.i16( + , , i16, i32); @@ -1097,6 +1146,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv8i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1130,6 +1180,7 @@ } declare @llvm.riscv.vwsub.w.nxv16i32.i16( + , , i16, i32); @@ -1142,6 +1193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv16i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1175,6 +1227,7 @@ } declare @llvm.riscv.vwsub.w.nxv1i64.i32( + , , i32, i32); @@ -1187,6 +1240,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv1i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1220,6 +1274,7 @@ } declare @llvm.riscv.vwsub.w.nxv2i64.i32( + , , i32, i32); @@ -1232,6 +1287,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv2i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1265,6 +1321,7 @@ } declare @llvm.riscv.vwsub.w.nxv4i64.i32( + , , i32, i32); @@ -1277,6 +1334,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv4i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1310,6 +1368,7 @@ } declare @llvm.riscv.vwsub.w.nxv8i64.i32( + , , i32, i32); @@ -1322,6 +1381,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv8i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1873,6 +1933,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + undef, %1, %0, i32 %2) @@ -1889,6 +1950,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + undef, %1, %0, i32 %2) @@ -1905,6 +1967,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + undef, %1, %0, i32 %2) @@ -1921,6 +1984,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + undef, %1, %0, i32 %2) @@ -1937,6 +2001,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + undef, %1, %0, i32 %2) @@ -1953,6 +2018,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + undef, %1, %0, i32 %2) @@ -1969,6 +2035,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + undef, %1, %0, i32 %2) @@ -1985,6 +2052,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + undef, %1, %0, i32 %2) @@ -2001,6 +2069,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + undef, %1, %0, i32 %2) @@ -2017,6 +2086,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + undef, %1, %0, i32 %2) @@ -2033,6 +2103,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( + undef, %1, %0, i32 %2) @@ -2049,6 +2120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( + undef, %1, %0, i32 %2) @@ -2065,6 +2137,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( + undef, %1, %0, i32 %2) @@ -2081,6 +2154,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( + undef, %1, %0, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -273,6 +285,7 @@ } declare @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + , , , i64); @@ -285,6 +298,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vwsub.w.nxv16i32.nxv16i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -499,6 +521,7 @@ } declare @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( + , , , i64); @@ -511,6 +534,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -544,6 +568,7 @@ } declare @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( + , , , i64); @@ -556,6 +581,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -680,6 +710,7 @@ } declare @llvm.riscv.vwsub.w.nxv1i16.i8( + , , i8, i64); @@ -692,6 +723,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv1i16.i8( + undef, %0, i8 %1, i64 %2) @@ -725,6 +757,7 @@ } declare @llvm.riscv.vwsub.w.nxv2i16.i8( + , , i8, i64); @@ -737,6 +770,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv2i16.i8( + undef, %0, i8 %1, i64 %2) @@ -770,6 +804,7 @@ } declare @llvm.riscv.vwsub.w.nxv4i16.i8( + , , i8, i64); @@ -782,6 +817,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv4i16.i8( + undef, %0, i8 %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vwsub.w.nxv8i16.i8( + , , i8, i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv8i16.i8( + undef, %0, i8 %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vwsub.w.nxv16i16.i8( + , , i8, i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv16i16.i8( + undef, %0, i8 %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vwsub.w.nxv32i16.i8( + , , i8, i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv32i16.i8( + undef, %0, i8 %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vwsub.w.nxv1i32.i16( + , , i16, i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv1i32.i16( + undef, %0, i16 %1, i64 %2) @@ -995,6 +1039,7 @@ } declare @llvm.riscv.vwsub.w.nxv2i32.i16( + , , i16, i64); @@ -1007,6 +1052,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv2i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1040,6 +1086,7 @@ } declare @llvm.riscv.vwsub.w.nxv4i32.i16( + , , i16, i64); @@ -1052,6 +1099,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv4i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1085,6 +1133,7 @@ } declare @llvm.riscv.vwsub.w.nxv8i32.i16( + , , i16, i64); @@ -1097,6 +1146,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv8i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1130,6 +1180,7 @@ } declare @llvm.riscv.vwsub.w.nxv16i32.i16( + , , i16, i64); @@ -1142,6 +1193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv16i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1175,6 +1227,7 @@ } declare @llvm.riscv.vwsub.w.nxv1i64.i32( + , , i32, i64); @@ -1187,6 +1240,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv1i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1220,6 +1274,7 @@ } declare @llvm.riscv.vwsub.w.nxv2i64.i32( + , , i32, i64); @@ -1232,6 +1287,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv2i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1265,6 +1321,7 @@ } declare @llvm.riscv.vwsub.w.nxv4i64.i32( + , , i32, i64); @@ -1277,6 +1334,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv4i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1310,6 +1368,7 @@ } declare @llvm.riscv.vwsub.w.nxv8i64.i32( + , , i32, i64); @@ -1322,6 +1381,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv8i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1873,6 +1933,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + undef, %1, %0, i64 %2) @@ -1889,6 +1950,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + undef, %1, %0, i64 %2) @@ -1905,6 +1967,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + undef, %1, %0, i64 %2) @@ -1921,6 +1984,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + undef, %1, %0, i64 %2) @@ -1937,6 +2001,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + undef, %1, %0, i64 %2) @@ -1953,6 +2018,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + undef, %1, %0, i64 %2) @@ -1969,6 +2035,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + undef, %1, %0, i64 %2) @@ -1985,6 +2052,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + undef, %1, %0, i64 %2) @@ -2001,6 +2069,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + undef, %1, %0, i64 %2) @@ -2017,6 +2086,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + undef, %1, %0, i64 %2) @@ -2033,6 +2103,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( + undef, %1, %0, i64 %2) @@ -2049,6 +2120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( + undef, %1, %0, i64 %2) @@ -2065,6 +2137,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( + undef, %1, %0, i64 %2) @@ -2081,6 +2154,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( + undef, %1, %0, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + , , , i32); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8( + , , , i32); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8( + , , , i32); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8( + , , , i32); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8( + , , , i32); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8( + , , , i32); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16( + , , , i32); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16( + , , , i32); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16( + , , , i32); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16( + , , , i32); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16( + , , , i32); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32( + , , , i32); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32( + , , , i32); @@ -567,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -600,6 +626,7 @@ } declare @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32( + , , , i32); @@ -613,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -646,6 +674,7 @@ } declare @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32( + , , , i32); @@ -659,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -692,6 +722,7 @@ } declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8( + , , i8, i32); @@ -705,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -738,6 +770,7 @@ } declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8( + , , i8, i32); @@ -751,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -784,6 +818,7 @@ } declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8( + , , i8, i32); @@ -797,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -830,6 +866,7 @@ } declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8( + , , i8, i32); @@ -843,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -876,6 +914,7 @@ } declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8( + , , i8, i32); @@ -889,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -922,6 +962,7 @@ } declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8( + , , i8, i32); @@ -935,6 +976,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -968,6 +1010,7 @@ } declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16( + , , i16, i32); @@ -981,6 +1024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1014,6 +1058,7 @@ } declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16( + , , i16, i32); @@ -1027,6 +1072,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1060,6 +1106,7 @@ } declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16( + , , i16, i32); @@ -1073,6 +1120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1106,6 +1154,7 @@ } declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16( + , , i16, i32); @@ -1119,6 +1168,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1152,6 +1202,7 @@ } declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16( + , , i16, i32); @@ -1165,6 +1216,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1198,6 +1250,7 @@ } declare @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32( + , , i32, i32); @@ -1211,6 +1264,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1244,6 +1298,7 @@ } declare @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32( + , , i32, i32); @@ -1257,6 +1312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1290,6 +1346,7 @@ } declare @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32( + , , i32, i32); @@ -1303,6 +1360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1336,6 +1394,7 @@ } declare @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32( + , , i32, i32); @@ -1349,6 +1408,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + , , , i64); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8( + , , , i64); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8( + , , , i64); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8( + , , , i64); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8( + , , , i64); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8( + , , , i64); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16( + , , , i64); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16( + , , , i64); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16( + , , , i64); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16( + , , , i64); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16( + , , , i64); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32( + , , , i64); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32( + , , , i64); @@ -567,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -600,6 +626,7 @@ } declare @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32( + , , , i64); @@ -613,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -646,6 +674,7 @@ } declare @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32( + , , , i64); @@ -659,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -692,6 +722,7 @@ } declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8( + , , i8, i64); @@ -705,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -738,6 +770,7 @@ } declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8( + , , i8, i64); @@ -751,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -784,6 +818,7 @@ } declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8( + , , i8, i64); @@ -797,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -830,6 +866,7 @@ } declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8( + , , i8, i64); @@ -843,6 +880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -876,6 +914,7 @@ } declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8( + , , i8, i64); @@ -889,6 +928,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -922,6 +962,7 @@ } declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8( + , , i8, i64); @@ -935,6 +976,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -968,6 +1010,7 @@ } declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16( + , , i16, i64); @@ -981,6 +1024,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1014,6 +1058,7 @@ } declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16( + , , i16, i64); @@ -1027,6 +1072,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1060,6 +1106,7 @@ } declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16( + , , i16, i64); @@ -1073,6 +1120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1106,6 +1154,7 @@ } declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16( + , , i16, i64); @@ -1119,6 +1168,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1152,6 +1202,7 @@ } declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16( + , , i16, i64); @@ -1165,6 +1216,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1198,6 +1250,7 @@ } declare @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32( + , , i32, i64); @@ -1211,6 +1264,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1244,6 +1298,7 @@ } declare @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32( + , , i32, i64); @@ -1257,6 +1312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1290,6 +1346,7 @@ } declare @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32( + , , i32, i64); @@ -1303,6 +1360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1336,6 +1394,7 @@ } declare @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32( + , , i32, i64); @@ -1349,6 +1408,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + undef, %0, %1, i32 %2) @@ -273,6 +285,7 @@ } declare @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + , , , i32); @@ -285,6 +298,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16( + undef, %0, %1, i32 %2) @@ -499,6 +521,7 @@ } declare @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( + , , , i32); @@ -511,6 +534,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( + undef, %0, %1, i32 %2) @@ -544,6 +568,7 @@ } declare @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( + , , , i32); @@ -556,6 +581,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( + undef, %0, %1, i32 %2) @@ -680,6 +710,7 @@ } declare @llvm.riscv.vwsubu.w.nxv1i16.i8( + , , i8, i32); @@ -692,6 +723,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i16.i8( + undef, %0, i8 %1, i32 %2) @@ -725,6 +757,7 @@ } declare @llvm.riscv.vwsubu.w.nxv2i16.i8( + , , i8, i32); @@ -737,6 +770,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i16.i8( + undef, %0, i8 %1, i32 %2) @@ -770,6 +804,7 @@ } declare @llvm.riscv.vwsubu.w.nxv4i16.i8( + , , i8, i32); @@ -782,6 +817,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i16.i8( + undef, %0, i8 %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vwsubu.w.nxv8i16.i8( + , , i8, i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i16.i8( + undef, %0, i8 %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vwsubu.w.nxv16i16.i8( + , , i8, i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv16i16.i8( + undef, %0, i8 %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vwsubu.w.nxv32i16.i8( + , , i8, i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv32i16.i8( + undef, %0, i8 %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vwsubu.w.nxv1i32.i16( + , , i16, i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i32.i16( + undef, %0, i16 %1, i32 %2) @@ -995,6 +1039,7 @@ } declare @llvm.riscv.vwsubu.w.nxv2i32.i16( + , , i16, i32); @@ -1007,6 +1052,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1040,6 +1086,7 @@ } declare @llvm.riscv.vwsubu.w.nxv4i32.i16( + , , i16, i32); @@ -1052,6 +1099,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1085,6 +1133,7 @@ } declare @llvm.riscv.vwsubu.w.nxv8i32.i16( + , , i16, i32); @@ -1097,6 +1146,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1130,6 +1180,7 @@ } declare @llvm.riscv.vwsubu.w.nxv16i32.i16( + , , i16, i32); @@ -1142,6 +1193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv16i32.i16( + undef, %0, i16 %1, i32 %2) @@ -1175,6 +1227,7 @@ } declare @llvm.riscv.vwsubu.w.nxv1i64.i32( + , , i32, i32); @@ -1187,6 +1240,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1220,6 +1274,7 @@ } declare @llvm.riscv.vwsubu.w.nxv2i64.i32( + , , i32, i32); @@ -1232,6 +1287,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1265,6 +1321,7 @@ } declare @llvm.riscv.vwsubu.w.nxv4i64.i32( + , , i32, i32); @@ -1277,6 +1334,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1310,6 +1368,7 @@ } declare @llvm.riscv.vwsubu.w.nxv8i64.i32( + , , i32, i32); @@ -1322,6 +1381,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i64.i32( + undef, %0, i32 %1, i32 %2) @@ -1873,6 +1933,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + undef, %1, %0, i32 %2) @@ -1889,6 +1950,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + undef, %1, %0, i32 %2) @@ -1905,6 +1967,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + undef, %1, %0, i32 %2) @@ -1921,6 +1984,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + undef, %1, %0, i32 %2) @@ -1937,6 +2001,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + undef, %1, %0, i32 %2) @@ -1953,6 +2018,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + undef, %1, %0, i32 %2) @@ -1969,6 +2035,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + undef, %1, %0, i32 %2) @@ -1985,6 +2052,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + undef, %1, %0, i32 %2) @@ -2001,6 +2069,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + undef, %1, %0, i32 %2) @@ -2017,6 +2086,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + undef, %1, %0, i32 %2) @@ -2033,6 +2103,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( + undef, %1, %0, i32 %2) @@ -2049,6 +2120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( + undef, %1, %0, i32 %2) @@ -2065,6 +2137,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( + undef, %1, %0, i32 %2) @@ -2081,6 +2154,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( + undef, %1, %0, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + undef, %0, %1, i64 %2) @@ -273,6 +285,7 @@ } declare @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + , , , i64); @@ -285,6 +298,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16( + undef, %0, %1, i64 %2) @@ -499,6 +521,7 @@ } declare @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( + , , , i64); @@ -511,6 +534,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( + undef, %0, %1, i64 %2) @@ -544,6 +568,7 @@ } declare @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( + , , , i64); @@ -556,6 +581,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( + undef, %0, %1, i64 %2) @@ -680,6 +710,7 @@ } declare @llvm.riscv.vwsubu.w.nxv1i16.i8( + , , i8, i64); @@ -692,6 +723,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i16.i8( + undef, %0, i8 %1, i64 %2) @@ -725,6 +757,7 @@ } declare @llvm.riscv.vwsubu.w.nxv2i16.i8( + , , i8, i64); @@ -737,6 +770,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i16.i8( + undef, %0, i8 %1, i64 %2) @@ -770,6 +804,7 @@ } declare @llvm.riscv.vwsubu.w.nxv4i16.i8( + , , i8, i64); @@ -782,6 +817,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i16.i8( + undef, %0, i8 %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vwsubu.w.nxv8i16.i8( + , , i8, i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i16.i8( + undef, %0, i8 %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vwsubu.w.nxv16i16.i8( + , , i8, i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv16i16.i8( + undef, %0, i8 %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vwsubu.w.nxv32i16.i8( + , , i8, i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv32i16.i8( + undef, %0, i8 %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vwsubu.w.nxv1i32.i16( + , , i16, i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i32.i16( + undef, %0, i16 %1, i64 %2) @@ -995,6 +1039,7 @@ } declare @llvm.riscv.vwsubu.w.nxv2i32.i16( + , , i16, i64); @@ -1007,6 +1052,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1040,6 +1086,7 @@ } declare @llvm.riscv.vwsubu.w.nxv4i32.i16( + , , i16, i64); @@ -1052,6 +1099,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1085,6 +1133,7 @@ } declare @llvm.riscv.vwsubu.w.nxv8i32.i16( + , , i16, i64); @@ -1097,6 +1146,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1130,6 +1180,7 @@ } declare @llvm.riscv.vwsubu.w.nxv16i32.i16( + , , i16, i64); @@ -1142,6 +1193,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv16i32.i16( + undef, %0, i16 %1, i64 %2) @@ -1175,6 +1227,7 @@ } declare @llvm.riscv.vwsubu.w.nxv1i64.i32( + , , i32, i64); @@ -1187,6 +1240,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1220,6 +1274,7 @@ } declare @llvm.riscv.vwsubu.w.nxv2i64.i32( + , , i32, i64); @@ -1232,6 +1287,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1265,6 +1321,7 @@ } declare @llvm.riscv.vwsubu.w.nxv4i64.i32( + , , i32, i64); @@ -1277,6 +1334,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1310,6 +1368,7 @@ } declare @llvm.riscv.vwsubu.w.nxv8i64.i32( + , , i32, i64); @@ -1322,6 +1381,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i64.i32( + undef, %0, i32 %1, i64 %2) @@ -1873,6 +1933,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + undef, %1, %0, i64 %2) @@ -1889,6 +1950,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + undef, %1, %0, i64 %2) @@ -1905,6 +1967,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + undef, %1, %0, i64 %2) @@ -1921,6 +1984,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + undef, %1, %0, i64 %2) @@ -1937,6 +2001,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + undef, %1, %0, i64 %2) @@ -1953,6 +2018,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + undef, %1, %0, i64 %2) @@ -1969,6 +2035,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + undef, %1, %0, i64 %2) @@ -1985,6 +2052,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + undef, %1, %0, i64 %2) @@ -2001,6 +2069,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + undef, %1, %0, i64 %2) @@ -2017,6 +2086,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + undef, %1, %0, i64 %2) @@ -2033,6 +2103,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( + undef, %1, %0, i64 %2) @@ -2049,6 +2120,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( + undef, %1, %0, i64 %2) @@ -2065,6 +2137,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( + undef, %1, %0, i64 %2) @@ -2081,6 +2154,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( + undef, %1, %0, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vxor.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vxor.nxv2i8.nxv2i8( + , , , i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vxor.nxv4i8.nxv4i8( + , , , i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vxor.nxv8i8.nxv8i8( + , , , i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vxor.nxv16i8.nxv16i8( + , , , i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vxor.nxv32i8.nxv32i8( + , , , i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vxor.nxv64i8.nxv64i8( + , , , i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vxor.nxv1i16.nxv1i16( + , , , i32); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vxor.nxv2i16.nxv2i16( + , , , i32); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vxor.nxv4i16.nxv4i16( + , , , i32); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vxor.nxv8i16.nxv8i16( + , , , i32); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vxor.nxv16i16.nxv16i16( + , , , i32); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vxor.nxv32i16.nxv32i16( + , , , i32); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vxor.nxv1i32.nxv1i32( + , , , i32); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vxor.nxv2i32.nxv2i32( + , , , i32); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vxor.nxv4i32.nxv4i32( + , , , i32); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vxor.nxv8i32.nxv8i32( + , , , i32); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vxor.nxv16i32.nxv16i32( + , , , i32); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vxor.nxv1i64.nxv1i64( + , , , i32); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vxor.nxv2i64.nxv2i64( + , , , i32); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vxor.nxv4i64.nxv4i64( + , , , i32); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vxor.nxv8i64.nxv8i64( + , , , i32); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vxor.nxv1i8.i8( + , , i8, i32); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vxor.nxv2i8.i8( + , , i8, i32); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vxor.nxv4i8.i8( + , , i8, i32); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vxor.nxv8i8.i8( + , , i8, i32); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vxor.nxv16i8.i8( + , , i8, i32); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vxor.nxv32i8.i8( + , , i8, i32); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vxor.nxv64i8.i8( + , , i8, i32); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vxor.nxv1i16.i16( + , , i16, i32); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vxor.nxv2i16.i16( + , , i16, i32); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vxor.nxv4i16.i16( + , , i16, i32); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vxor.nxv8i16.i16( + , , i16, i32); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vxor.nxv16i16.i16( + , , i16, i32); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vxor.nxv32i16.i16( + , , i16, i32); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vxor.nxv1i32.i32( + , , i32, i32); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vxor.nxv2i32.i32( + , , i32, i32); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vxor.nxv4i32.i32( + , , i32, i32); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vxor.nxv8i32.i32( + , , i32, i32); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vxor.nxv16i32.i32( + , , i32, i32); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vxor.nxv1i64.i64( + , , i64, i32); @@ -1824,6 +1905,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1863,6 +1945,7 @@ } declare @llvm.riscv.vxor.nxv2i64.i64( + , , i64, i32); @@ -1881,6 +1964,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1920,6 +2004,7 @@ } declare @llvm.riscv.vxor.nxv4i64.i64( + , , i64, i32); @@ -1938,6 +2023,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1977,6 +2063,7 @@ } declare @llvm.riscv.vxor.nxv8i64.i64( + , , i64, i32); @@ -1995,6 +2082,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -2041,6 +2129,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2073,6 +2162,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2105,6 +2195,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2137,6 +2228,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2169,6 +2261,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2201,6 +2294,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2233,6 +2327,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv64i8.i8( + undef, %0, i8 9, i32 %1) @@ -2265,6 +2360,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2297,6 +2393,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2329,6 +2426,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2361,6 +2459,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2393,6 +2492,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2425,6 +2525,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2457,6 +2558,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2489,6 +2591,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2521,6 +2624,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2553,6 +2657,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2585,6 +2690,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2617,6 +2723,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2649,6 +2756,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2681,6 +2789,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2713,6 +2822,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vxor.nxv1i8.nxv1i8( + , , , i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i8.nxv1i8( + undef, %0, %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vxor.nxv2i8.nxv2i8( + , , , i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i8.nxv2i8( + undef, %0, %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vxor.nxv4i8.nxv4i8( + , , , i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i8.nxv4i8( + undef, %0, %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vxor.nxv8i8.nxv8i8( + , , , i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i8.nxv8i8( + undef, %0, %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vxor.nxv16i8.nxv16i8( + , , , i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv16i8.nxv16i8( + undef, %0, %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vxor.nxv32i8.nxv32i8( + , , , i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv32i8.nxv32i8( + undef, %0, %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vxor.nxv64i8.nxv64i8( + , , , i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv64i8.nxv64i8( + undef, %0, %1, i64 %2) @@ -318,6 +332,7 @@ } declare @llvm.riscv.vxor.nxv1i16.nxv1i16( + , , , i64); @@ -330,6 +345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i16.nxv1i16( + undef, %0, %1, i64 %2) @@ -363,6 +379,7 @@ } declare @llvm.riscv.vxor.nxv2i16.nxv2i16( + , , , i64); @@ -375,6 +392,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i16.nxv2i16( + undef, %0, %1, i64 %2) @@ -408,6 +426,7 @@ } declare @llvm.riscv.vxor.nxv4i16.nxv4i16( + , , , i64); @@ -420,6 +439,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i16.nxv4i16( + undef, %0, %1, i64 %2) @@ -453,6 +473,7 @@ } declare @llvm.riscv.vxor.nxv8i16.nxv8i16( + , , , i64); @@ -465,6 +486,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i16.nxv8i16( + undef, %0, %1, i64 %2) @@ -498,6 +520,7 @@ } declare @llvm.riscv.vxor.nxv16i16.nxv16i16( + , , , i64); @@ -510,6 +533,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv16i16.nxv16i16( + undef, %0, %1, i64 %2) @@ -543,6 +567,7 @@ } declare @llvm.riscv.vxor.nxv32i16.nxv32i16( + , , , i64); @@ -555,6 +580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv32i16.nxv32i16( + undef, %0, %1, i64 %2) @@ -589,6 +615,7 @@ } declare @llvm.riscv.vxor.nxv1i32.nxv1i32( + , , , i64); @@ -601,6 +628,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i32.nxv1i32( + undef, %0, %1, i64 %2) @@ -634,6 +662,7 @@ } declare @llvm.riscv.vxor.nxv2i32.nxv2i32( + , , , i64); @@ -646,6 +675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i32.nxv2i32( + undef, %0, %1, i64 %2) @@ -679,6 +709,7 @@ } declare @llvm.riscv.vxor.nxv4i32.nxv4i32( + , , , i64); @@ -691,6 +722,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i32.nxv4i32( + undef, %0, %1, i64 %2) @@ -724,6 +756,7 @@ } declare @llvm.riscv.vxor.nxv8i32.nxv8i32( + , , , i64); @@ -736,6 +769,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i32.nxv8i32( + undef, %0, %1, i64 %2) @@ -769,6 +803,7 @@ } declare @llvm.riscv.vxor.nxv16i32.nxv16i32( + , , , i64); @@ -781,6 +816,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv16i32.nxv16i32( + undef, %0, %1, i64 %2) @@ -815,6 +851,7 @@ } declare @llvm.riscv.vxor.nxv1i64.nxv1i64( + , , , i64); @@ -827,6 +864,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i64.nxv1i64( + undef, %0, %1, i64 %2) @@ -860,6 +898,7 @@ } declare @llvm.riscv.vxor.nxv2i64.nxv2i64( + , , , i64); @@ -872,6 +911,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i64.nxv2i64( + undef, %0, %1, i64 %2) @@ -905,6 +945,7 @@ } declare @llvm.riscv.vxor.nxv4i64.nxv4i64( + , , , i64); @@ -917,6 +958,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i64.nxv4i64( + undef, %0, %1, i64 %2) @@ -950,6 +992,7 @@ } declare @llvm.riscv.vxor.nxv8i64.nxv8i64( + , , , i64); @@ -962,6 +1005,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i64.nxv8i64( + undef, %0, %1, i64 %2) @@ -996,6 +1040,7 @@ } declare @llvm.riscv.vxor.nxv1i8.i8( + , , i8, i64); @@ -1008,6 +1053,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1041,6 +1087,7 @@ } declare @llvm.riscv.vxor.nxv2i8.i8( + , , i8, i64); @@ -1053,6 +1100,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1086,6 +1134,7 @@ } declare @llvm.riscv.vxor.nxv4i8.i8( + , , i8, i64); @@ -1098,6 +1147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1131,6 +1181,7 @@ } declare @llvm.riscv.vxor.nxv8i8.i8( + , , i8, i64); @@ -1143,6 +1194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1176,6 +1228,7 @@ } declare @llvm.riscv.vxor.nxv16i8.i8( + , , i8, i64); @@ -1188,6 +1241,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv16i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1221,6 +1275,7 @@ } declare @llvm.riscv.vxor.nxv32i8.i8( + , , i8, i64); @@ -1233,6 +1288,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv32i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1266,6 +1322,7 @@ } declare @llvm.riscv.vxor.nxv64i8.i8( + , , i8, i64); @@ -1278,6 +1335,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv64i8.i8( + undef, %0, i8 %1, i64 %2) @@ -1311,6 +1369,7 @@ } declare @llvm.riscv.vxor.nxv1i16.i16( + , , i16, i64); @@ -1323,6 +1382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1356,6 +1416,7 @@ } declare @llvm.riscv.vxor.nxv2i16.i16( + , , i16, i64); @@ -1368,6 +1429,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1401,6 +1463,7 @@ } declare @llvm.riscv.vxor.nxv4i16.i16( + , , i16, i64); @@ -1413,6 +1476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1446,6 +1510,7 @@ } declare @llvm.riscv.vxor.nxv8i16.i16( + , , i16, i64); @@ -1458,6 +1523,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1491,6 +1557,7 @@ } declare @llvm.riscv.vxor.nxv16i16.i16( + , , i16, i64); @@ -1503,6 +1570,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv16i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1536,6 +1604,7 @@ } declare @llvm.riscv.vxor.nxv32i16.i16( + , , i16, i64); @@ -1548,6 +1617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv32i16.i16( + undef, %0, i16 %1, i64 %2) @@ -1581,6 +1651,7 @@ } declare @llvm.riscv.vxor.nxv1i32.i32( + , , i32, i64); @@ -1593,6 +1664,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1626,6 +1698,7 @@ } declare @llvm.riscv.vxor.nxv2i32.i32( + , , i32, i64); @@ -1638,6 +1711,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1671,6 +1745,7 @@ } declare @llvm.riscv.vxor.nxv4i32.i32( + , , i32, i64); @@ -1683,6 +1758,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1716,6 +1792,7 @@ } declare @llvm.riscv.vxor.nxv8i32.i32( + , , i32, i64); @@ -1728,6 +1805,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1761,6 +1839,7 @@ } declare @llvm.riscv.vxor.nxv16i32.i32( + , , i32, i64); @@ -1773,6 +1852,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv16i32.i32( + undef, %0, i32 %1, i64 %2) @@ -1806,6 +1886,7 @@ } declare @llvm.riscv.vxor.nxv1i64.i64( + , , i64, i64); @@ -1818,6 +1899,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1851,6 +1933,7 @@ } declare @llvm.riscv.vxor.nxv2i64.i64( + , , i64, i64); @@ -1863,6 +1946,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1896,6 +1980,7 @@ } declare @llvm.riscv.vxor.nxv4i64.i64( + , , i64, i64); @@ -1908,6 +1993,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1941,6 +2027,7 @@ } declare @llvm.riscv.vxor.nxv8i64.i64( + , , i64, i64); @@ -1953,6 +2040,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i64.i64( + undef, %0, i64 %1, i64 %2) @@ -1993,6 +2081,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i8.i8( + undef, %0, i8 9, i64 %1) @@ -2025,6 +2114,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i8.i8( + undef, %0, i8 9, i64 %1) @@ -2057,6 +2147,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i8.i8( + undef, %0, i8 9, i64 %1) @@ -2089,6 +2180,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i8.i8( + undef, %0, i8 9, i64 %1) @@ -2121,6 +2213,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv16i8.i8( + undef, %0, i8 9, i64 %1) @@ -2153,6 +2246,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv32i8.i8( + undef, %0, i8 9, i64 %1) @@ -2185,6 +2279,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv64i8.i8( + undef, %0, i8 9, i64 %1) @@ -2217,6 +2312,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i16.i16( + undef, %0, i16 9, i64 %1) @@ -2249,6 +2345,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i16.i16( + undef, %0, i16 9, i64 %1) @@ -2281,6 +2378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i16.i16( + undef, %0, i16 9, i64 %1) @@ -2313,6 +2411,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i16.i16( + undef, %0, i16 9, i64 %1) @@ -2345,6 +2444,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv16i16.i16( + undef, %0, i16 9, i64 %1) @@ -2377,6 +2477,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv32i16.i16( + undef, %0, i16 9, i64 %1) @@ -2409,6 +2510,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i32.i32( + undef, %0, i32 9, i64 %1) @@ -2441,6 +2543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i32.i32( + undef, %0, i32 9, i64 %1) @@ -2473,6 +2576,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i32.i32( + undef, %0, i32 9, i64 %1) @@ -2505,6 +2609,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i32.i32( + undef, %0, i32 9, i64 %1) @@ -2537,6 +2642,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv16i32.i32( + undef, %0, i32 9, i64 %1) @@ -2569,6 +2675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv1i64.i64( + undef, %0, i64 9, i64 %1) @@ -2601,6 +2708,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv2i64.i64( + undef, %0, i64 9, i64 %1) @@ -2633,6 +2741,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv4i64.i64( + undef, %0, i64 9, i64 %1) @@ -2665,6 +2774,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vxor.nxv8i64.i64( + undef, %0, i64 9, i64 %1)