diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -173,20 +173,13 @@ // This builtin has a granted vector length parameter. bit HasVL = true; - // There are several cases for specifying tail policy. - // 1. Add tail policy argument to masked intrinsics. It may have the maskedoff - // argument or not. - // * Have the maskedoff argument: (HasPolicy = true, HasMaskedOffOperand = true) - // Ex: vadd_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, ta); - // * Do not have the maskedoff argument: (HasPolicy = true, HasMaskedOffOperand = false) - // Ex: vmacc_vv_i8m1_mt(mask, vd, vs1, vs2, vl, ta); - // 2. Add dest argument for no mask intrinsics. (TODO) - // Ex: vmv_v_x_i8m1_t(dest, src, vl); - // 3. Always tail agnostic. (HasPolicy = false) - // Ex: vmseq_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl); - // The tail policy argument is located at the last position. + // The masked intrinsic IR have the policy operand. + // The policy argument is located at the last position. bit HasPolicy = true; + // The nomask intrinsic IR have the passthru operand. + bit HasNoMaskPolicy = false; + // This builtin supports non-masked function overloading api. // All masked operations support overloading api. bit HasNoMaskedOverloaded = true; @@ -1583,15 +1576,18 @@ // 12. Vector Integer Arithmetic Instructions // 12.1. Vector Single-Width Integer Add and Subtract +let HasNoMaskPolicy = true in { defm vadd : RVVIntBinBuiltinSet; defm vsub : RVVIntBinBuiltinSet; defm vrsub : RVVOutOp1BuiltinSet<"vrsub", "csil", [["vx", "v", "vve"], ["vx", "Uv", "UvUvUe"]]>; +} defm vneg_v : RVVPseudoUnaryBuiltin<"vrsub", "csil">; // 12.2. Vector Widening Integer Add/Subtract // Widening unsigned integer add/subtract, 2*SEW = SEW +/- SEW +let HasNoMaskPolicy = true in { defm vwaddu : RVVUnsignedWidenBinBuiltinSet; defm vwsubu : RVVUnsignedWidenBinBuiltinSet; // Widening signed integer add/subtract, 2*SEW = SEW +/- SEW @@ -1603,6 +1599,7 @@ // Widening signed integer add/subtract, 2*SEW = 2*SEW +/- SEW defm vwadd : RVVSignedWidenOp0BinBuiltinSet; defm vwsub : RVVSignedWidenOp0BinBuiltinSet; +} defm vwcvtu_x_x_v : RVVPseudoVWCVTBuiltin<"vwaddu", "vwcvtu_x", "csi", [["Uw", "UwUv"]]>; defm vwcvt_x_x_v : RVVPseudoVWCVTBuiltin<"vwadd", "vwcvt_x", "csi", @@ -1633,12 +1630,15 @@ } // 12.5. Vector Bitwise Logical Instructions +let HasNoMaskPolicy = true in { defm vand : RVVIntBinBuiltinSet; defm vxor : RVVIntBinBuiltinSet; defm vor : RVVIntBinBuiltinSet; +} defm vnot_v : RVVPseudoVNotBuiltin<"vxor", "csil">; // 12.6. Vector Single-Width Bit Shift Instructions +let HasNoMaskPolicy = true in { defm vsll : RVVShiftBuiltinSet; defm vsrl : RVVUnsignedShiftBuiltinSet; defm vsra : RVVSignedShiftBuiltinSet; @@ -1646,6 +1646,7 @@ // 12.7. Vector Narrowing Integer Right Shift Instructions defm vnsrl : RVVUnsignedNShiftBuiltinSet; defm vnsra : RVVSignedNShiftBuiltinSet; +} defm vncvt_x_x_w : RVVPseudoVNCVTBuiltin<"vnsrl", "vncvt_x", "csi", [["v", "vw"], ["Uv", "UvUw"]]>; @@ -1665,6 +1666,7 @@ } // 12.9. Vector Integer Min/Max Instructions +let HasNoMaskPolicy = true in { defm vminu : RVVUnsignedBinBuiltinSet; defm vmin : RVVSignedBinBuiltinSet; defm vmaxu : RVVUnsignedBinBuiltinSet; @@ -1683,6 +1685,7 @@ defm vdiv : RVVSignedBinBuiltinSet; defm vremu : RVVUnsignedBinBuiltinSet; defm vrem : RVVSignedBinBuiltinSet; +} // 12.12. Vector Widening Integer Multiply Instructions let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { @@ -1751,6 +1754,7 @@ // 13. Vector Fixed-Point Arithmetic Instructions // 13.1. Vector Single-Width Saturating Add and Subtract +let HasNoMaskPolicy = true in { defm vsaddu : RVVUnsignedBinBuiltinSet; defm vsadd : RVVSignedBinBuiltinSet; defm vssubu : RVVUnsignedBinBuiltinSet; @@ -1798,6 +1802,7 @@ [["vv", "w", "wvv"], ["vf", "w", "wve"]]>; } +} // 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions let HasPolicy = false in { @@ -1827,8 +1832,10 @@ def vfrec7 : RVVFloatingUnaryVVBuiltin; // 14.11. Vector Floating-Point MIN/MAX Instructions +let HasNoMaskPolicy = true in { defm vfmin : RVVFloatingBinBuiltinSet; defm vfmax : RVVFloatingBinBuiltinSet; +} // 14.12. Vector Floating-Point Sign-Injection Instructions defm vfsgnj : RVVFloatingBinBuiltinSet; @@ -2007,6 +2014,7 @@ } // 17.3.3. Vector Slide1up Instructions +let HasNoMaskPolicy = true in { defm vslide1up : RVVSlideOneBuiltinSet; defm vfslide1up : RVVFloatingBinVFBuiltinSet; @@ -2029,6 +2037,7 @@ [["vx", "Uv", "UvUvz"]]>; defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csil", [["vv", "Uv", "UvUv(Log2EEW:4)Uv"]]>; +} // 17.5. Vector Compress Instruction let HasMask = false, HasPolicy = false, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -97,7 +97,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -115,7 +115,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -124,7 +124,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -133,7 +133,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -205,7 +205,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -214,7 +214,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -223,7 +223,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -232,7 +232,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -259,7 +259,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -268,7 +268,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -277,7 +277,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -286,7 +286,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -295,7 +295,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -322,7 +322,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -331,7 +331,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -349,7 +349,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -358,7 +358,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -376,7 +376,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -385,7 +385,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -394,7 +394,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -403,7 +403,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -412,7 +412,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -421,7 +421,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -430,7 +430,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -439,7 +439,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -448,7 +448,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -457,7 +457,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -466,7 +466,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -475,7 +475,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -484,7 +484,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -493,7 +493,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -502,7 +502,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -511,7 +511,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -520,7 +520,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -529,7 +529,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -547,7 +547,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -556,7 +556,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -565,7 +565,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -574,7 +574,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -583,7 +583,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -592,7 +592,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -601,7 +601,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -610,7 +610,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -619,7 +619,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -628,7 +628,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -637,7 +637,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -646,7 +646,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -655,7 +655,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -664,7 +664,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -673,7 +673,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -682,7 +682,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -691,7 +691,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -700,7 +700,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -709,7 +709,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -718,7 +718,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -727,7 +727,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -736,7 +736,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -745,7 +745,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -754,7 +754,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -763,7 +763,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -772,7 +772,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -781,7 +781,7 @@ // CHECK-RV64-LABEL: @test_vadd_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -790,7 +790,7 @@ // CHECK-RV64-LABEL: @test_vadd_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( undef, [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -15,7 +15,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -24,7 +24,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { @@ -33,7 +33,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -51,7 +51,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -60,7 +60,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { @@ -105,7 +105,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { @@ -114,7 +114,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -123,7 +123,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -132,7 +132,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { @@ -141,7 +141,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { @@ -150,7 +150,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { @@ -177,7 +177,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -195,7 +195,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { @@ -222,7 +222,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -231,7 +231,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -240,7 +240,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -267,7 +267,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -276,7 +276,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -312,7 +312,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { @@ -321,7 +321,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { @@ -330,7 +330,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { @@ -366,7 +366,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -375,7 +375,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { @@ -393,7 +393,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { @@ -402,7 +402,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -411,7 +411,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -420,7 +420,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -447,7 +447,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { @@ -465,7 +465,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { @@ -474,7 +474,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -483,7 +483,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -492,7 +492,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { @@ -501,7 +501,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { @@ -510,7 +510,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { @@ -546,7 +546,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, @@ -556,7 +556,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -565,7 +565,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, @@ -594,7 +594,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -603,7 +603,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, @@ -613,7 +613,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { @@ -622,7 +622,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -631,7 +631,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -640,7 +640,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { @@ -649,7 +649,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { @@ -658,7 +658,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -667,7 +667,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -676,7 +676,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { @@ -685,7 +685,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { @@ -694,7 +694,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -703,7 +703,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -712,7 +712,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { @@ -721,7 +721,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { @@ -730,7 +730,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -739,7 +739,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -748,7 +748,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { @@ -757,7 +757,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i64( undef, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { @@ -766,7 +766,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, @@ -776,7 +776,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -785,7 +785,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -795,7 +795,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { @@ -804,7 +804,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, @@ -814,7 +814,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -823,7 +823,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { @@ -832,7 +832,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { @@ -841,7 +841,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -850,7 +850,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -859,7 +859,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { @@ -868,7 +868,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { @@ -877,7 +877,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -886,7 +886,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -895,7 +895,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { @@ -904,7 +904,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { @@ -913,7 +913,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -922,7 +922,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -931,7 +931,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { @@ -940,7 +940,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i64( undef, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { @@ -949,7 +949,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, @@ -959,7 +959,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -968,7 +968,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { @@ -977,7 +977,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { @@ -986,7 +986,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -995,7 +995,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1004,7 +1004,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { @@ -1013,7 +1013,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { @@ -1022,7 +1022,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1031,7 +1031,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1040,7 +1040,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { @@ -1049,7 +1049,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { @@ -1058,7 +1058,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1076,7 +1076,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,7 +1085,7 @@ // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -161,6 +161,7 @@ bool IsMask; bool HasVL; bool HasPolicy; + bool HasNoMaskPolicy; bool HasNoMaskedOverloaded; bool HasAutoDef; // There is automiatic definition in header std::string ManualCodegen; @@ -176,8 +177,8 @@ RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName, StringRef MangledSuffix, StringRef IRName, bool IsMask, bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, - bool HasNoMaskedOverloaded, bool HasAutoDef, - StringRef ManualCodegen, const RVVTypes &Types, + bool HasNoMaskPolicy, bool HasNoMaskedOverloaded, + bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types, const std::vector &IntrinsicTypes, const std::vector &RequiredExtensions, unsigned NF); ~RVVIntrinsic() = default; @@ -187,6 +188,7 @@ StringRef getMangledName() const { return MangledName; } bool hasVL() const { return HasVL; } bool hasPolicy() const { return HasPolicy; } + bool hasNoMaskPolicy() const { return HasNoMaskPolicy; } bool hasNoMaskedOverloaded() const { return HasNoMaskedOverloaded; } bool hasManualCodegen() const { return !ManualCodegen.empty(); } bool hasAutoDef() const { return HasAutoDef; } @@ -765,16 +767,15 @@ //===----------------------------------------------------------------------===// // RVVIntrinsic implementation //===----------------------------------------------------------------------===// -RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, - StringRef NewMangledName, StringRef MangledSuffix, - StringRef IRName, bool IsMask, - bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, - bool HasNoMaskedOverloaded, bool HasAutoDef, - StringRef ManualCodegen, const RVVTypes &OutInTypes, - const std::vector &NewIntrinsicTypes, - const std::vector &RequiredExtensions, - unsigned NF) +RVVIntrinsic::RVVIntrinsic( + StringRef NewName, StringRef Suffix, StringRef NewMangledName, + StringRef MangledSuffix, StringRef IRName, bool IsMask, + bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, bool HasNoMaskPolicy, + bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, + const RVVTypes &OutInTypes, const std::vector &NewIntrinsicTypes, + const std::vector &RequiredExtensions, unsigned NF) : IRName(IRName), IsMask(IsMask), HasVL(HasVL), HasPolicy(HasPolicy), + HasNoMaskPolicy(HasNoMaskPolicy), HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef), ManualCodegen(ManualCodegen.str()), NF(NF) { @@ -817,7 +818,7 @@ // IntrinsicTypes is nonmasked version index. Need to update it // if there is maskedoff operand (It is always in first operand). IntrinsicTypes = NewIntrinsicTypes; - if (IsMask && HasMaskedOffOperand) { + if ((IsMask && HasMaskedOffOperand) || (!IsMask && HasNoMaskPolicy)) { for (auto &I : IntrinsicTypes) { if (I >= 0) I += NF; @@ -854,6 +855,9 @@ } else { OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n"; } + } else if (hasNoMaskPolicy()) { + OS << " Ops.push_back(llvm::UndefValue::get(ResultType));\n"; + OS << " std::rotate(Ops.rbegin(), Ops.rbegin() + 1, Ops.rend());\n"; } OS << " IntrinsicTypes = {"; @@ -1101,6 +1105,8 @@ PrintFatalError("Builtin with same name has different HasPolicy"); else if (P.first->second->hasPolicy() != Def->hasPolicy()) PrintFatalError("Builtin with same name has different HasPolicy"); + else if (P.first->second->hasNoMaskPolicy() != Def->hasNoMaskPolicy()) + PrintFatalError("Builtin with same name has different HasNoMaskPolicy"); else if (P.first->second->getIntrinsicTypes() != Def->getIntrinsicTypes()) PrintFatalError("Builtin with same name has different IntrinsicTypes"); } @@ -1148,6 +1154,7 @@ bool HasMaskedOffOperand = R->getValueAsBit("HasMaskedOffOperand"); bool HasVL = R->getValueAsBit("HasVL"); bool HasPolicy = R->getValueAsBit("HasPolicy"); + bool HasNoMaskPolicy = R->getValueAsBit("HasNoMaskPolicy"); bool HasNoMaskedOverloaded = R->getValueAsBit("HasNoMaskedOverloaded"); std::vector Log2LMULList = R->getValueAsListOfInts("Log2LMUL"); StringRef ManualCodegen = R->getValueAsString("ManualCodegen"); @@ -1222,8 +1229,8 @@ Out.push_back(std::make_unique( Name, SuffixStr, MangledName, MangledSuffixStr, IRName, /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasPolicy, - HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(), - IntrinsicTypes, RequiredExtensions, NF)); + HasNoMaskPolicy, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, + Types.getValue(), IntrinsicTypes, RequiredExtensions, NF)); if (HasMask) { // Create a mask intrinsic Optional MaskTypes = @@ -1231,8 +1238,9 @@ Out.push_back(std::make_unique( Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask, /*IsMask=*/true, HasMaskedOffOperand, HasVL, HasPolicy, - HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, - MaskTypes.getValue(), IntrinsicTypes, RequiredExtensions, NF)); + HasNoMaskPolicy, HasNoMaskedOverloaded, HasAutoDef, + ManualCodegenMask, MaskTypes.getValue(), IntrinsicTypes, + RequiredExtensions, NF)); } } // end for Log2LMULList } // end for TypeRange diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -356,12 +356,13 @@ let VLOperand = 2; } // For destination vector type is the same as first and second source vector. - // Input: (vector_in, int_vector_in, vl) + // Input: (passthru, vector_in, int_vector_in, vl) class RISCVRGatherVVNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, + LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first and second source vector. // Input: (vector_in, vector_in, int_vector_in, vl, ta) @@ -373,13 +374,14 @@ [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 4; } - // Input: (vector_in, int16_vector_in, vl) + // Input: (passthru, vector_in, int16_vector_in, vl) class RISCVRGatherEI16VVNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, + [LLVMMatchType<0>, LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first and second source vector. // Input: (vector_in, vector_in, int16_vector_in, vl, ta) @@ -394,12 +396,13 @@ } // For destination vector type is the same as first source vector, and the // second operand is XLen. - // Input: (vector_in, xlen_in, vl) + // Input: (passthru, vector_in, xlen_in, vl) class RISCVGatherVXNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, + LLVMMatchType<1>], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first source vector (with mask). // Second operand is XLen. @@ -413,13 +416,14 @@ let VLOperand = 4; } // For destination vector type is the same as first source vector. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVBinaryAAXNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let SplatOperand = 1; - let VLOperand = 2; + let SplatOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first source vector (with mask). // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) @@ -434,12 +438,13 @@ } // For destination vector type is the same as first source vector. The // second source operand must match the destination type or be an XLen scalar. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVBinaryAAShiftNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is the same as first source vector (with mask). // The second source operand must match the destination type or be an XLen scalar. @@ -453,13 +458,14 @@ let VLOperand = 4; } // For destination vector type is NOT the same as first source vector. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVBinaryABXNoMask : Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let SplatOperand = 1; - let VLOperand = 2; + let SplatOperand = 2; + let VLOperand = 3; } // For destination vector type is NOT the same as first source vector (with mask). // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) @@ -474,12 +480,13 @@ } // For destination vector type is NOT the same as first source vector. The // second source operand must match the destination type or be an XLen scalar. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVBinaryABShiftNoMask : Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For destination vector type is NOT the same as first source vector (with mask). // The second source operand must match the destination type or be an XLen scalar. @@ -568,13 +575,14 @@ } // For Saturating binary operations. // The destination vector type is the same as first source vector. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVSaturatingBinaryAAXNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { - let SplatOperand = 1; - let VLOperand = 2; + let SplatOperand = 2; + let VLOperand = 3; } // For Saturating binary operations with mask. // The destination vector type is the same as first source vector. @@ -591,12 +599,13 @@ // For Saturating binary operations. // The destination vector type is the same as first source vector. // The second source operand matches the destination type or is an XLen scalar. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVSaturatingBinaryAAShiftNoMask : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For Saturating binary operations with mask. // The destination vector type is the same as first source vector. @@ -613,12 +622,13 @@ // For Saturating binary operations. // The destination vector type is NOT the same as first source vector. // The second source operand matches the destination type or is an XLen scalar. - // Input: (vector_in, vector_in/scalar_in, vl) + // Input: (passthru, vector_in, vector_in/scalar_in, vl) class RISCVSaturatingBinaryABShiftNoMask : Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, + llvm_anyint_ty], [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { - let VLOperand = 2; + let VLOperand = 3; } // For Saturating binary operations with mask. // The destination vector type is NOT the same as first source vector (with mask). diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -982,6 +982,24 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoBinaryNoMaskTU : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let HasMergeOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoTiedBinaryNoMask : @@ -1000,6 +1018,25 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoTiedBinaryNoMaskTU : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 0; // Merge is also rs2. + let HasDummyMask = 1; + let ForceTailAgnostic = 1; + let isConvertibleToThreeAddress = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoIStoreNoMask LMUL, bit Ordered>: Pseudo<(outs), @@ -1611,6 +1648,8 @@ let VLMul = MInfo.value in { def "_" # MInfo.MX : VPseudoBinaryNoMask; + def "_" # MInfo.MX # "_TU" : VPseudoBinaryNoMaskTU; def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskTA; } @@ -1639,6 +1678,8 @@ let VLMul = lmul.value in { def "_" # lmul.MX # "_" # emul.MX : VPseudoBinaryNoMask; + def "_" # lmul.MX # "_" # emul.MX # "_TU": VPseudoBinaryNoMaskTU; def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskTA; } @@ -1651,6 +1692,8 @@ let VLMul = MInfo.value in { def "_" # MInfo.MX # "_TIED": VPseudoTiedBinaryNoMask; + def "_" # MInfo.MX # "_TIED_TU": VPseudoTiedBinaryNoMaskTU; def "_" # MInfo.MX # "_MASK_TIED" : VPseudoTiedBinaryMask; } @@ -2722,14 +2765,14 @@ (mask_type VR:$rs2), GPR:$vl, sew)>; -class VPatBinaryNoMask : +class VPatBinaryM : Pat<(result_type (!cast(intrinsic_name) (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), @@ -2739,6 +2782,44 @@ (op2_type op2_kind:$rs2), GPR:$vl, sew)>; +class VPatBinaryNoMaskTA : + Pat<(result_type (!cast(intrinsic_name) + (result_type (undef)), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + VLOpFrag)), + (!cast(inst) + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + GPR:$vl, sew)>; + +class VPatBinaryNoMaskTU : + Pat<(result_type (!cast(intrinsic_name) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + VLOpFrag)), + (!cast(inst#"_TU") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + GPR:$vl, sew)>; + // Same as above but source operands are swapped. class VPatBinaryNoMaskSwapped : Pat<(result_type (!cast(intrinsic_name) + (result_type (undef)), (result_type result_reg_class:$rs1), (op2_type op2_kind:$rs2), VLOpFrag)), @@ -2840,6 +2922,23 @@ (op2_type op2_kind:$rs2), GPR:$vl, sew)>; +class VPatTiedBinaryNoMaskTU : + Pat<(result_type (!cast(intrinsic_name) + (result_type result_reg_class:$merge), + (result_type result_reg_class:$merge), + (op2_type op2_kind:$rs2), + VLOpFrag)), + (!cast(inst#"_TIED_TU") + (result_type result_reg_class:$merge), + (op2_type op2_kind:$rs2), + GPR:$vl, sew)>; + class VPatTiedBinaryMask; } -multiclass VPatBinary { - def : VPatBinaryNoMask; + def : VPatBinaryM; def : VPatBinaryMask; @@ -3053,8 +3152,10 @@ VReg op1_reg_class, DAGOperand op2_kind> { - def : VPatBinaryNoMask; + def : VPatBinaryNoMaskTA; + def : VPatBinaryNoMaskTU; def : VPatBinaryMaskTA; @@ -3226,9 +3327,9 @@ multiclass VPatBinaryM_MM { foreach mti = AllMasks in - def : VPatBinaryNoMask; + def : VPatBinaryM; } multiclass VPatBinaryW_VV; + def : VPatTiedBinaryNoMaskTU; let AddedComplexity = 1 in def : VPatTiedBinaryMask vtilist> { foreach vti = vtilist in - defm : VPatBinary; + defm : VPatBinaryM; } multiclass VPatBinarySwappedM_VV vtilist> { foreach vti = vtilist in { defvar kind = "V"#vti.ScalarSuffix; - defm : VPatBinary; + defm : VPatBinaryM; } } multiclass VPatBinaryM_VI vtilist> { foreach vti = vtilist in - defm : VPatBinary; + defm : VPatBinaryM; } multiclass VPatBinaryV_VV_VX_VI("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$rs1, @@ -3953,7 +4058,8 @@ (XLenVT timm:$policy))>; // Match VSUB with a small immediate to vadd.vi by negating the immediate. - def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$rs1), + def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector (undef)), + (vti.Vector vti.RegClass:$rs1), (vti.Scalar simm5_plus1:$rs2), VLOpFrag)), (!cast("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1, diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu-rv32.ll @@ -90,3 +90,47 @@ ret %a } + +declare @llvm.riscv.vadd.nxv1i8.nxv1i8( + , + , + , + i32); + +define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vadd.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + , + , + , + i32); + +define @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vwadd.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu-rv64.ll @@ -89,3 +89,47 @@ ret %a } + +declare @llvm.riscv.vadd.nxv1i8.nxv1i8( + , + , + , + i64); + +define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vadd.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + , + , + , + i64); + +define @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vwadd.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vadd.nxv1i8.nxv1i8( + , , , i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + undef, %0, %1, i32 %2) @@ -46,6 +48,7 @@ } declare @llvm.riscv.vadd.nxv2i8.nxv2i8( + , , , i32); @@ -58,6 +61,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.nxv2i8( + undef, %0, %1, i32 %2) @@ -90,6 +94,7 @@ } declare @llvm.riscv.vadd.nxv4i8.nxv4i8( + , , , i32); @@ -102,6 +107,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.nxv4i8( + undef, %0, %1, i32 %2) @@ -134,6 +140,7 @@ } declare @llvm.riscv.vadd.nxv8i8.nxv8i8( + , , , i32); @@ -146,6 +153,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.nxv8i8( + undef, %0, %1, i32 %2) @@ -178,6 +186,7 @@ } declare @llvm.riscv.vadd.nxv16i8.nxv16i8( + , , , i32); @@ -190,6 +199,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i8.nxv16i8( + undef, %0, %1, i32 %2) @@ -222,6 +232,7 @@ } declare @llvm.riscv.vadd.nxv32i8.nxv32i8( + , , , i32); @@ -234,6 +245,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv32i8.nxv32i8( + undef, %0, %1, i32 %2) @@ -266,6 +278,7 @@ } declare @llvm.riscv.vadd.nxv64i8.nxv64i8( + , , , i32); @@ -278,6 +291,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv64i8.nxv64i8( + undef, %0, %1, i32 %2) @@ -311,6 +325,7 @@ } declare @llvm.riscv.vadd.nxv1i16.nxv1i16( + , , , i32); @@ -323,6 +338,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i16.nxv1i16( + undef, %0, %1, i32 %2) @@ -355,6 +371,7 @@ } declare @llvm.riscv.vadd.nxv2i16.nxv2i16( + , , , i32); @@ -367,6 +384,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i16.nxv2i16( + undef, %0, %1, i32 %2) @@ -399,6 +417,7 @@ } declare @llvm.riscv.vadd.nxv4i16.nxv4i16( + , , , i32); @@ -411,6 +430,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i16.nxv4i16( + undef, %0, %1, i32 %2) @@ -443,6 +463,7 @@ } declare @llvm.riscv.vadd.nxv8i16.nxv8i16( + , , , i32); @@ -455,6 +476,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i16.nxv8i16( + undef, %0, %1, i32 %2) @@ -487,6 +509,7 @@ } declare @llvm.riscv.vadd.nxv16i16.nxv16i16( + , , , i32); @@ -499,6 +522,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i16.nxv16i16( + undef, %0, %1, i32 %2) @@ -531,6 +555,7 @@ } declare @llvm.riscv.vadd.nxv32i16.nxv32i16( + , , , i32); @@ -543,6 +568,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv32i16.nxv32i16( + undef, %0, %1, i32 %2) @@ -576,6 +602,7 @@ } declare @llvm.riscv.vadd.nxv1i32.nxv1i32( + , , , i32); @@ -588,6 +615,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i32.nxv1i32( + undef, %0, %1, i32 %2) @@ -620,6 +648,7 @@ } declare @llvm.riscv.vadd.nxv2i32.nxv2i32( + , , , i32); @@ -632,6 +661,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i32.nxv2i32( + undef, %0, %1, i32 %2) @@ -664,6 +694,7 @@ } declare @llvm.riscv.vadd.nxv4i32.nxv4i32( + , , , i32); @@ -676,6 +707,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i32.nxv4i32( + undef, %0, %1, i32 %2) @@ -708,6 +740,7 @@ } declare @llvm.riscv.vadd.nxv8i32.nxv8i32( + , , , i32); @@ -720,6 +753,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i32.nxv8i32( + undef, %0, %1, i32 %2) @@ -752,6 +786,7 @@ } declare @llvm.riscv.vadd.nxv16i32.nxv16i32( + , , , i32); @@ -764,6 +799,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i32.nxv16i32( + undef, %0, %1, i32 %2) @@ -797,6 +833,7 @@ } declare @llvm.riscv.vadd.nxv1i64.nxv1i64( + , , , i32); @@ -809,6 +846,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64( + undef, %0, %1, i32 %2) @@ -841,6 +879,7 @@ } declare @llvm.riscv.vadd.nxv2i64.nxv2i64( + , , , i32); @@ -853,6 +892,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i64.nxv2i64( + undef, %0, %1, i32 %2) @@ -885,6 +925,7 @@ } declare @llvm.riscv.vadd.nxv4i64.nxv4i64( + , , , i32); @@ -897,6 +938,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i64.nxv4i64( + undef, %0, %1, i32 %2) @@ -929,6 +971,7 @@ } declare @llvm.riscv.vadd.nxv8i64.nxv8i64( + , , , i32); @@ -941,6 +984,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i64.nxv8i64( + undef, %0, %1, i32 %2) @@ -974,6 +1018,7 @@ } declare @llvm.riscv.vadd.nxv1i8.i8( + , , i8, i32); @@ -986,6 +1031,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1018,6 +1064,7 @@ } declare @llvm.riscv.vadd.nxv2i8.i8( + , , i8, i32); @@ -1030,6 +1077,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1062,6 +1110,7 @@ } declare @llvm.riscv.vadd.nxv4i8.i8( + , , i8, i32); @@ -1074,6 +1123,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1106,6 +1156,7 @@ } declare @llvm.riscv.vadd.nxv8i8.i8( + , , i8, i32); @@ -1118,6 +1169,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1150,6 +1202,7 @@ } declare @llvm.riscv.vadd.nxv16i8.i8( + , , i8, i32); @@ -1162,6 +1215,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1194,6 +1248,7 @@ } declare @llvm.riscv.vadd.nxv32i8.i8( + , , i8, i32); @@ -1206,6 +1261,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv32i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1238,6 +1294,7 @@ } declare @llvm.riscv.vadd.nxv64i8.i8( + , , i8, i32); @@ -1250,6 +1307,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv64i8.i8( + undef, %0, i8 %1, i32 %2) @@ -1282,6 +1340,7 @@ } declare @llvm.riscv.vadd.nxv1i16.i16( + , , i16, i32); @@ -1294,6 +1353,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1326,6 +1386,7 @@ } declare @llvm.riscv.vadd.nxv2i16.i16( + , , i16, i32); @@ -1338,6 +1399,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1370,6 +1432,7 @@ } declare @llvm.riscv.vadd.nxv4i16.i16( + , , i16, i32); @@ -1382,6 +1445,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1414,6 +1478,7 @@ } declare @llvm.riscv.vadd.nxv8i16.i16( + , , i16, i32); @@ -1426,6 +1491,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1458,6 +1524,7 @@ } declare @llvm.riscv.vadd.nxv16i16.i16( + , , i16, i32); @@ -1470,6 +1537,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1502,6 +1570,7 @@ } declare @llvm.riscv.vadd.nxv32i16.i16( + , , i16, i32); @@ -1514,6 +1583,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv32i16.i16( + undef, %0, i16 %1, i32 %2) @@ -1546,6 +1616,7 @@ } declare @llvm.riscv.vadd.nxv1i32.i32( + , , i32, i32); @@ -1558,6 +1629,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1590,6 +1662,7 @@ } declare @llvm.riscv.vadd.nxv2i32.i32( + , , i32, i32); @@ -1602,6 +1675,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1634,6 +1708,7 @@ } declare @llvm.riscv.vadd.nxv4i32.i32( + , , i32, i32); @@ -1646,6 +1721,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1678,6 +1754,7 @@ } declare @llvm.riscv.vadd.nxv8i32.i32( + , , i32, i32); @@ -1690,6 +1767,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1722,6 +1800,7 @@ } declare @llvm.riscv.vadd.nxv16i32.i32( + , , i32, i32); @@ -1734,6 +1813,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i32.i32( + undef, %0, i32 %1, i32 %2) @@ -1766,6 +1846,7 @@ } declare @llvm.riscv.vadd.nxv1i64.i64( + , , i64, i32); @@ -1784,6 +1865,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1822,6 +1904,7 @@ } declare @llvm.riscv.vadd.nxv2i64.i64( + , , i64, i32); @@ -1840,6 +1923,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1878,6 +1962,7 @@ } declare @llvm.riscv.vadd.nxv4i64.i64( + , , i64, i32); @@ -1896,6 +1981,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1934,6 +2020,7 @@ } declare @llvm.riscv.vadd.nxv8i64.i64( + , , i64, i32); @@ -1952,6 +2039,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i64.i64( + undef, %0, i64 %1, i32 %2) @@ -1997,6 +2085,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.i8( + undef, %0, i8 9, i32 %1) @@ -2029,6 +2118,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.i8( + undef, %0, i8 9, i32 %1) @@ -2061,6 +2151,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.i8( + undef, %0, i8 9, i32 %1) @@ -2093,6 +2184,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.i8( + undef, %0, i8 9, i32 %1) @@ -2125,6 +2217,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i8.i8( + undef, %0, i8 9, i32 %1) @@ -2157,6 +2250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv32i8.i8( + undef, %0, i8 9, i32 %1) @@ -2189,6 +2283,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv64i8.i8( + undef, %0, i8 -9, i32 %1) @@ -2221,6 +2316,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i16.i16( + undef, %0, i16 9, i32 %1) @@ -2253,6 +2349,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i16.i16( + undef, %0, i16 9, i32 %1) @@ -2285,6 +2382,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i16.i16( + undef, %0, i16 9, i32 %1) @@ -2317,6 +2415,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i16.i16( + undef, %0, i16 9, i32 %1) @@ -2349,6 +2448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i16.i16( + undef, %0, i16 9, i32 %1) @@ -2381,6 +2481,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv32i16.i16( + undef, %0, i16 9, i32 %1) @@ -2413,6 +2514,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i32.i32( + undef, %0, i32 9, i32 %1) @@ -2445,6 +2547,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i32.i32( + undef, %0, i32 9, i32 %1) @@ -2477,6 +2580,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i32.i32( + undef, %0, i32 9, i32 %1) @@ -2509,6 +2613,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i32.i32( + undef, %0, i32 9, i32 %1) @@ -2541,6 +2646,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv16i32.i32( + undef, %0, i32 9, i32 %1) @@ -2573,6 +2679,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i64.i64( + undef, %0, i64 9, i32 %1) @@ -2605,6 +2712,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i64.i64( + undef, %0, i64 9, i32 %1) @@ -2637,6 +2745,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i64.i64( + undef, %0, i64 9, i32 %1) @@ -2669,6 +2778,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i64.i64( + undef, %0, i64 9, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ ; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( + , , , i32); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( + undef, %0, %1, i32 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( + , , , i32); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16( + undef, %0, %1, i32 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( + , , , i32); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16( + undef, %0, %1, i32 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( + , , , i32); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16( + undef, %0, %1, i32 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( + , , , i32); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16( + undef, %0, %1, i32 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( + , , , i32); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32( + undef, %0, %1, i32 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( + , , , i32); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32( + undef, %0, %1, i32 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( + , , , i32); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32( + undef, %0, %1, i32 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( + , , , i32); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32( + undef, %0, %1, i32 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( + , , half, i32); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16( + undef, %0, half %1, i32 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( + , , half, i32); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16( + undef, %0, half %1, i32 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( + , , half, i32); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16( + undef, %0, half %1, i32 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( + , , half, i32); @@ -567,6 +592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16( + undef, %0, half %1, i32 %2) @@ -600,6 +626,7 @@ } declare @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( + , , half, i32); @@ -613,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16( + undef, %0, half %1, i32 %2) @@ -646,6 +674,7 @@ } declare @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( + , , float, i32); @@ -659,6 +688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32( + undef, %0, float %1, i32 %2) @@ -692,6 +722,7 @@ } declare @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( + , , float, i32); @@ -705,6 +736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32( + undef, %0, float %1, i32 %2) @@ -738,6 +770,7 @@ } declare @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( + , , float, i32); @@ -751,6 +784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32( + undef, %0, float %1, i32 %2) @@ -784,6 +818,7 @@ } declare @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( + , , float, i32); @@ -797,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32( + undef, %0, float %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -15,11 +15,11 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %a, %1, i64 %2) + %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %a, %1, i64 %2) br label %if.end if.else: ; preds = %entry - %c = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( %a, %1, i64 %2) + %c = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, %a, %1, i64 %2) br label %if.end if.end: ; preds = %if.else, %if.then @@ -56,11 +56,11 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %0, %1, i64 %2) + %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %0, %1, i64 %2) br label %if.end if.else: ; preds = %entry - %b = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( %1, %1, i64 %2) + %b = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, %1, %1, i64 %2) br label %if.end if.end: ; preds = %if.else, %if.then @@ -79,11 +79,11 @@ br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %0, %1, i64 %vl) + %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( undef, %0, %1, i64 %vl) br label %if.end if.else: ; preds = %entry - %c = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( %0, %1, i64 %vl) + %c = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( undef, %0, %1, i64 %vl) br label %if.end if.end: ; preds = %if.else, %if.then @@ -96,10 +96,10 @@ } ; Function Attrs: nounwind readnone - declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , i64) #1 + declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , , i64) #1 ; Function Attrs: nounwind readnone - declare @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(, , i64) #1 + declare @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(, , , i64) #1 ; Function Attrs: nounwind readonly declare @llvm.riscv.vle.nxv1i64.i64(, * nocapture, i64) #3