diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1045,8 +1045,13 @@ EVT OpVT = ScalarOp.getValueType(); if (OpVT == MVT::i8 || OpVT == MVT::i16 || (OpVT == MVT::i32 && Subtarget.is64Bit())) { - ScalarOp = - DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), ScalarOp); + // If the operand is a constant, sign extend to increase our chances + // of being able to use a .vi instruction. ANY_EXTEND would become a + // a zero extend and the simm5 check in isel would fail. + // FIXME: Should we ignore the upper bits in isel instead? + unsigned ExtOpc = isa(ScalarOp) ? ISD::SIGN_EXTEND + : ISD::ANY_EXTEND; + ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp); return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(), Operands); } @@ -1087,9 +1092,15 @@ EVT OpVT = ScalarOp.getValueType(); if (OpVT == MVT::i8 || OpVT == MVT::i16 || (OpVT == MVT::i32 && Subtarget.is64Bit())) { - ScalarOp = - DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), ScalarOp); - return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(), Operands); + // If the operand is a constant, sign extend to increase our chances + // of being able to use a .vi instruction. ANY_EXTEND would become a + // a zero extend and the simm5 check in isel would fail. + // FIXME: Should we ignore the upper bits in isel instead? + unsigned ExtOpc = isa(ScalarOp) ? ISD::SIGN_EXTEND + : ISD::ANY_EXTEND; + ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp); + return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(), + Operands); } } } diff --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll @@ -724,10 +724,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i8_nxv1i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv1i8.i8( %0, - i8 9, + i8 -9, %1, i32 %2) @@ -752,10 +752,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i8_nxv4i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv4i8.i8( %0, - i8 9, + i8 -9, %1, i32 %2) @@ -780,10 +780,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv16i8_nxv16i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv16i8.i8( %0, - i8 9, + i8 -9, %1, i32 %2) @@ -808,10 +808,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv64i8_nxv64i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv64i8.i8( %0, - i8 9, + i8 -9, %1, i32 %2) @@ -836,10 +836,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i16_nxv2i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv2i16.i16( %0, - i16 9, + i16 -9, %1, i32 %2) @@ -864,10 +864,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i16_nxv8i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv8i16.i16( %0, - i16 9, + i16 -9, %1, i32 %2) @@ -892,10 +892,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv32i16_nxv32i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv32i16.i16( %0, - i16 9, + i16 -9, %1, i32 %2) @@ -920,10 +920,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i32_nxv2i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv2i32.i32( %0, - i32 9, + i32 -9, %1, i32 %2) @@ -948,10 +948,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i32_nxv8i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv8i32.i32( %0, - i32 9, + i32 -9, %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll @@ -898,10 +898,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i8_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv2i8.i8( %0, - i8 9, + i8 -9, %1, i64 %2) @@ -926,10 +926,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv8i8.i8( %0, - i8 9, + i8 -9, %1, i64 %2) @@ -954,10 +954,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv32i8_nxv32i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv32i8.i8( %0, - i8 9, + i8 -9, %1, i64 %2) @@ -982,10 +982,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i16_nxv1i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv1i16.i16( %0, - i16 9, + i16 -9, %1, i64 %2) @@ -1010,10 +1010,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i16_nxv4i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv4i16.i16( %0, - i16 9, + i16 -9, %1, i64 %2) @@ -1038,10 +1038,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv16i16_nxv16i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv16i16.i16( %0, - i16 9, + i16 -9, %1, i64 %2) @@ -1066,10 +1066,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i32_nxv1i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv1i32.i32( %0, - i32 9, + i32 -9, %1, i64 %2) @@ -1094,10 +1094,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i32_nxv4i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv4i32.i32( %0, - i32 9, + i32 -9, %1, i64 %2) @@ -1122,10 +1122,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv16i32_nxv16i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv16i32.i32( %0, - i32 9, + i32 -9, %1, i64 %2) @@ -1150,10 +1150,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i64_nxv2i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv2i64.i64( %0, - i64 9, + i64 -9, %1, i64 %2) @@ -1178,10 +1178,10 @@ entry: ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i64_nxv8i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vadc.nxv8i64.i64( %0, - i64 9, + i64 -9, %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll @@ -1457,11 +1457,11 @@ entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -1485,11 +1485,11 @@ entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -1513,11 +1513,11 @@ entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -1541,11 +1541,11 @@ entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -1569,11 +1569,11 @@ entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -1597,11 +1597,11 @@ entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -1612,10 +1612,10 @@ entry: ; CHECK-LABEL: intrinsic_vadd_vi_nxv64i8_nxv64i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vadd.nxv64i8.i8( %0, - i8 9, + i8 -9, i32 %1) ret %a @@ -1625,11 +1625,11 @@ entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv64i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -1653,11 +1653,11 @@ entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1681,11 +1681,11 @@ entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1709,11 +1709,11 @@ entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1737,11 +1737,11 @@ entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1765,11 +1765,11 @@ entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1793,11 +1793,11 @@ entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1821,11 +1821,11 @@ entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) @@ -1849,11 +1849,11 @@ entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) @@ -1877,11 +1877,11 @@ entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) @@ -1905,11 +1905,11 @@ entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) @@ -1933,11 +1933,11 @@ entry: ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll @@ -665,10 +665,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv2i1.i8( %0, - i8 9, + i8 -9, i32 %1) ret %a @@ -691,10 +691,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv8i1.i8( %0, - i8 9, + i8 -9, i32 %1) ret %a @@ -717,10 +717,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv32i1.i8( %0, - i8 9, + i8 -9, i32 %1) ret %a @@ -743,10 +743,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv1i1.i16( %0, - i16 9, + i16 -9, i32 %1) ret %a @@ -769,10 +769,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv4i1.i16( %0, - i16 9, + i16 -9, i32 %1) ret %a @@ -795,10 +795,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv16i1.i16( %0, - i16 9, + i16 -9, i32 %1) ret %a @@ -821,10 +821,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv1i1.i32( %0, - i32 9, + i32 -9, i32 %1) ret %a @@ -847,10 +847,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv4i1.i32( %0, - i32 9, + i32 -9, i32 %1) ret %a @@ -873,10 +873,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv16i1.i32( %0, - i32 9, + i32 -9, i32 %1) ret %a diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll @@ -809,10 +809,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv2i1.i8( %0, - i8 9, + i8 -9, i64 %1) ret %a @@ -835,10 +835,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv8i1.i8( %0, - i8 9, + i8 -9, i64 %1) ret %a @@ -861,10 +861,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv32i1.i8( %0, - i8 9, + i8 -9, i64 %1) ret %a @@ -887,10 +887,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv1i1.i16( %0, - i16 9, + i16 -9, i64 %1) ret %a @@ -913,10 +913,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv4i1.i16( %0, - i16 9, + i16 -9, i64 %1) ret %a @@ -939,10 +939,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv16i1.i16( %0, - i16 9, + i16 -9, i64 %1) ret %a @@ -965,10 +965,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv1i1.i32( %0, - i32 9, + i32 -9, i64 %1) ret %a @@ -991,10 +991,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv4i1.i32( %0, - i32 9, + i32 -9, i64 %1) ret %a @@ -1017,10 +1017,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv16i1.i32( %0, - i32 9, + i32 -9, i64 %1) ret %a @@ -1043,10 +1043,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv2i1.i64( %0, - i64 9, + i64 -9, i64 %1) ret %a @@ -1069,10 +1069,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vmadc.nxv8i1.i64( %0, - i64 9, + i64 -9, i64 %1) ret %a diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll @@ -738,10 +738,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i8( %0, - i8 9, + i8 -9, %1, i32 %2) @@ -766,10 +766,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i8( %0, - i8 9, + i8 -9, %1, i32 %2) @@ -794,10 +794,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.i8( %0, - i8 9, + i8 -9, %1, i32 %2) @@ -822,10 +822,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i16( %0, - i16 9, + i16 -9, %1, i32 %2) @@ -850,10 +850,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i16( %0, - i16 9, + i16 -9, %1, i32 %2) @@ -878,10 +878,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i16( %0, - i16 9, + i16 -9, %1, i32 %2) @@ -906,10 +906,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i32( %0, - i32 9, + i32 -9, %1, i32 %2) @@ -934,10 +934,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i32( %0, - i32 9, + i32 -9, %1, i32 %2) @@ -962,10 +962,10 @@ entry: ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0 %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i32( %0, - i32 9, + i32 -9, %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll @@ -737,11 +737,11 @@ entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv1i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -765,11 +765,11 @@ entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv2i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -793,11 +793,11 @@ entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv4i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -821,11 +821,11 @@ entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv8i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -849,11 +849,11 @@ entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv16i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -877,11 +877,11 @@ entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv32i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -905,11 +905,11 @@ entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv64i8.i8( %0, %1, - i8 9, + i8 -9, %2, i32 %3) @@ -933,11 +933,11 @@ entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv1i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -961,11 +961,11 @@ entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv2i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -989,11 +989,11 @@ entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv4i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1017,11 +1017,11 @@ entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv8i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1045,11 +1045,11 @@ entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv16i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1073,11 +1073,11 @@ entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv32i16.i16( %0, %1, - i16 9, + i16 -9, %2, i32 %3) @@ -1101,11 +1101,11 @@ entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv1i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) @@ -1129,11 +1129,11 @@ entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv2i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) @@ -1157,11 +1157,11 @@ entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv4i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) @@ -1185,11 +1185,11 @@ entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv8i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3) @@ -1213,11 +1213,11 @@ entry: ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32 ; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vrsub.mask.nxv16i32.i32( %0, %1, - i32 9, + i32 -9, %2, i32 %3)