diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -67,12 +67,12 @@ } def simm5_plus1 : Operand, ImmLeaf(Imm - 1);}]> { + [{return (isInt<5>(Imm) && Imm != -16) || Imm == 16;}]> { let ParserMatchClass = SImm5Plus1AsmOperand; let MCOperandPredicate = [{ int64_t Imm; if (MCOp.evaluateAsConstantImm(Imm)) - return isInt<5>(Imm - 1); + return (isInt<5>(Imm) && Imm != -16) || Imm == 16; return MCOp.isBareSymbolRef(); }]; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -3278,6 +3278,29 @@ defm PseudoVSUB : VPseudoBinaryV_VV_VX; defm PseudoVRSUB : VPseudoBinaryV_VX_VI; +foreach vti = AllIntegerVectors in { + // Match VSUB with a small immediate to vadd.vi by negating the immediate. + def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$rs1), + (vti.Scalar simm5_plus1:$rs2), + (XLenVT (VLOp GPR:$vl)))), + (!cast("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1, + (NegImm simm5_plus1:$rs2), + GPR:$vl, + vti.SEW)>; + def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge), + (vti.Vector vti.RegClass:$rs1), + (vti.Scalar simm5_plus1:$rs2), + (vti.Mask V0), + (XLenVT (VLOp GPR:$vl)))), + (!cast("PseudoVADD_VI_"#vti.LMul.MX#"_MASK") + vti.RegClass:$merge, + vti.RegClass:$rs1, + (NegImm simm5_plus1:$rs2), + (vti.Mask V0), + GPR:$vl, + vti.SEW)>; +} + //===----------------------------------------------------------------------===// // 12.2. Vector Widening Integer Add/Subtract //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll @@ -1587,3 +1587,579 @@ ret %a } + +define @intrinsic_vsub_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv1i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv1i8.i8( + %0, + %1, + i8 -9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv2i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv2i8.i8( + %0, + %1, + i8 -9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv4i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv4i8.i8( + %0, + %1, + i8 -9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv8i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv8i8.i8( + %0, + %1, + i8 -9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv16i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv16i8.i8( + %0, + %1, + i8 -9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv32i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv32i8.i8( + %0, + %1, + i8 -9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv64i8.i8( + %0, + i8 -9, + i32 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,tu,mu +; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv64i8.i8( + %0, + %1, + i8 -9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv1i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv1i16.i16( + %0, + %1, + i16 -9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv2i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv2i16.i16( + %0, + %1, + i16 -9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv4i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv4i16.i16( + %0, + %1, + i16 -9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv8i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv8i16.i16( + %0, + %1, + i16 -9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv16i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv16i16.i16( + %0, + %1, + i16 -9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv32i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,tu,mu +; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv32i16.i16( + %0, + %1, + i16 -9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv1i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv1i32.i32( + %0, + %1, + i32 -9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv2i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv2i32.i32( + %0, + %1, + i32 -9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv4i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv4i32.i32( + %0, + %1, + i32 -9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv8i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv8i32.i32( + %0, + %1, + i32 -9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv16i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,tu,mu +; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv16i32.i32( + %0, + %1, + i32 -9, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll @@ -1940,3 +1940,707 @@ ret %a } + +define @intrinsic_vsub_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv1i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv1i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv2i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv2i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv4i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv4i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv8i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv8i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv16i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv16i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv32i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv32i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv64i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,tu,mu +; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv64i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv1i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv1i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv2i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv2i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv4i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv4i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv8i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv8i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv16i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv16i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv32i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,tu,mu +; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv32i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv1i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv1i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv2i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv4i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv4i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv8i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv8i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv16i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,tu,mu +; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv16i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv1i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv1i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv2i64_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv2i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv2i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv4i64_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv4i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv4i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsub_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsub_vi_nxv8i64_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.nxv8i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu +; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vsub.mask.nxv8i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +}