diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -2563,14 +2563,15 @@ }; // This is an interim calling convention and it may be changed in the future. static const MCPhysReg ArgVRs[] = { - RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, RISCV::V20, - RISCV::V21, RISCV::V22, RISCV::V23 -}; -static const MCPhysReg ArgVRM2s[] = { - RISCV::V16M2, RISCV::V18M2, RISCV::V20M2, RISCV::V22M2 -}; -static const MCPhysReg ArgVRM4s[] = {RISCV::V16M4, RISCV::V20M4}; -static const MCPhysReg ArgVRM8s[] = {RISCV::V16M8}; + RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13, + RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, + RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23}; +static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2, + RISCV::V14M2, RISCV::V16M2, RISCV::V18M2, + RISCV::V20M2, RISCV::V22M2}; +static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4, + RISCV::V20M4}; +static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8}; // Pass a 2*XLEN argument that has been split into two XLEN values through // registers or the stack as necessary. diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s declare @llvm.riscv.vadd.nxv1i8.nxv1i8( @@ -6,10 +7,12 @@ i32); define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( %0, %1, @@ -26,10 +29,12 @@ i32); define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( %0, %1, @@ -46,10 +51,12 @@ i32); define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv2i8.nxv2i8( %0, %1, @@ -66,10 +73,12 @@ i32); define @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8( %0, %1, @@ -86,10 +95,12 @@ i32); define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv4i8.nxv4i8( %0, %1, @@ -106,10 +117,12 @@ i32); define @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8( %0, %1, @@ -126,10 +139,12 @@ i32); define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv8i8.nxv8i8( %0, %1, @@ -146,10 +161,12 @@ i32); define @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( %0, %1, @@ -166,10 +183,12 @@ i32); define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv16i8.nxv16i8( %0, %1, @@ -186,10 +205,12 @@ i32); define @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8( %0, %1, @@ -206,10 +227,12 @@ i32); define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv32i8.nxv32i8( %0, %1, @@ -226,10 +249,12 @@ i32); define @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8( %0, %1, @@ -246,10 +271,12 @@ i32); define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv64i8.nxv64i8( %0, %1, @@ -266,10 +293,14 @@ i32); define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu +; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8( %0, %1, @@ -286,10 +317,12 @@ i32); define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv1i16.nxv1i16( %0, %1, @@ -306,10 +339,12 @@ i32); define @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16( %0, %1, @@ -326,10 +361,12 @@ i32); define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv2i16.nxv2i16( %0, %1, @@ -346,10 +383,12 @@ i32); define @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16( %0, %1, @@ -366,10 +405,12 @@ i32); define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv4i16.nxv4i16( %0, %1, @@ -386,10 +427,12 @@ i32); define @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16( %0, %1, @@ -406,10 +449,12 @@ i32); define @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv8i16.nxv8i16( %0, %1, @@ -426,10 +471,12 @@ i32); define @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16( %0, %1, @@ -446,10 +493,12 @@ i32); define @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv16i16.nxv16i16( %0, %1, @@ -466,10 +515,12 @@ i32); define @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16( %0, %1, @@ -486,10 +537,12 @@ i32); define @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv32i16.nxv32i16( %0, %1, @@ -506,10 +559,14 @@ i32); define @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu +; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16( %0, %1, @@ -526,10 +583,12 @@ i32); define @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv1i32.nxv1i32( %0, %1, @@ -546,10 +605,12 @@ i32); define @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32( %0, %1, @@ -566,10 +627,12 @@ i32); define @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv2i32.nxv2i32( %0, %1, @@ -586,10 +649,12 @@ i32); define @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32( %0, %1, @@ -606,10 +671,12 @@ i32); define @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv4i32.nxv4i32( %0, %1, @@ -626,10 +693,12 @@ i32); define @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( %0, %1, @@ -646,10 +715,12 @@ i32); define @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv8i32.nxv8i32( %0, %1, @@ -666,10 +737,12 @@ i32); define @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32( %0, %1, @@ -686,10 +759,12 @@ i32); define @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv16i32.nxv16i32( %0, %1, @@ -706,10 +781,14 @@ i32); define @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu +; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32( %0, %1, @@ -726,10 +805,12 @@ i32); define @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv1i8.i8( %0, i8 %1, @@ -746,10 +827,12 @@ i32); define @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i8.i8( %0, %1, @@ -766,10 +849,12 @@ i32); define @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv2i8.i8( %0, i8 %1, @@ -786,10 +871,12 @@ i32); define @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i8.i8( %0, %1, @@ -806,10 +893,12 @@ i32); define @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv4i8.i8( %0, i8 %1, @@ -826,10 +915,12 @@ i32); define @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i8.i8( %0, %1, @@ -846,10 +937,12 @@ i32); define @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv8i8.i8( %0, i8 %1, @@ -866,10 +959,12 @@ i32); define @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i8.i8( %0, %1, @@ -886,10 +981,12 @@ i32); define @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv16i8.i8( %0, i8 %1, @@ -906,10 +1003,12 @@ i32); define @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i8.i8( %0, %1, @@ -926,10 +1025,12 @@ i32); define @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv32i8.i8( %0, i8 %1, @@ -946,10 +1047,12 @@ i32); define @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i8.i8( %0, %1, @@ -966,10 +1069,12 @@ i32); define @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv64i8.i8( %0, i8 %1, @@ -986,10 +1091,12 @@ i32); define @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m8,tu,mu +; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv64i8.i8( %0, %1, @@ -1006,10 +1113,12 @@ i32); define @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv1i16.i16( %0, i16 %1, @@ -1026,10 +1135,12 @@ i32); define @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i16.i16( %0, %1, @@ -1046,10 +1157,12 @@ i32); define @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv2i16.i16( %0, i16 %1, @@ -1066,10 +1179,12 @@ i32); define @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i16.i16( %0, %1, @@ -1086,10 +1201,12 @@ i32); define @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv4i16.i16( %0, i16 %1, @@ -1106,10 +1223,12 @@ i32); define @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i16.i16( %0, %1, @@ -1126,10 +1245,12 @@ i32); define @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv8i16.i16( %0, i16 %1, @@ -1146,10 +1267,12 @@ i32); define @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i16.i16( %0, %1, @@ -1166,10 +1289,12 @@ i32); define @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv16i16.i16( %0, i16 %1, @@ -1186,10 +1311,12 @@ i32); define @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i16.i16( %0, %1, @@ -1206,10 +1333,12 @@ i32); define @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv32i16.i16( %0, i16 %1, @@ -1226,10 +1355,12 @@ i32); define @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu +; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i16.i16( %0, %1, @@ -1246,10 +1377,12 @@ i32); define @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv1i32.i32( %0, i32 %1, @@ -1266,10 +1399,12 @@ i32); define @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i32.i32( %0, %1, @@ -1286,10 +1421,12 @@ i32); define @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv2i32.i32( %0, i32 %1, @@ -1306,10 +1443,12 @@ i32); define @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i32.i32( %0, %1, @@ -1326,10 +1465,12 @@ i32); define @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv4i32.i32( %0, i32 %1, @@ -1346,10 +1487,12 @@ i32); define @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i32.i32( %0, %1, @@ -1366,10 +1509,12 @@ i32); define @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv8i32.i32( %0, i32 %1, @@ -1386,10 +1531,12 @@ i32); define @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i32.i32( %0, %1, @@ -1406,10 +1553,12 @@ i32); define @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv16i32.i32( %0, i32 %1, @@ -1426,10 +1575,12 @@ i32); define @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu +; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i32.i32( %0, %1, @@ -1441,10 +1592,12 @@ } define @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv1i8.i8( %0, i8 9, @@ -1454,10 +1607,12 @@ } define @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i8.i8( %0, %1, @@ -1469,10 +1624,12 @@ } define @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv2i8.i8( %0, i8 9, @@ -1482,10 +1639,12 @@ } define @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i8.i8( %0, %1, @@ -1497,10 +1656,12 @@ } define @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv4i8.i8( %0, i8 9, @@ -1510,10 +1671,12 @@ } define @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i8.i8( %0, %1, @@ -1525,10 +1688,12 @@ } define @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv8i8.i8( %0, i8 9, @@ -1538,10 +1703,12 @@ } define @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i8.i8( %0, %1, @@ -1553,10 +1720,12 @@ } define @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv16i8.i8( %0, i8 9, @@ -1566,10 +1735,12 @@ } define @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i8.i8( %0, %1, @@ -1581,10 +1752,12 @@ } define @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv32i8.i8( %0, i8 9, @@ -1594,10 +1767,12 @@ } define @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i8.i8( %0, %1, @@ -1609,10 +1784,12 @@ } define @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9 %a = call @llvm.riscv.vadd.nxv64i8.i8( %0, i8 -9, @@ -1622,10 +1799,12 @@ } define @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,tu,mu +; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv64i8.i8( %0, %1, @@ -1637,10 +1816,12 @@ } define @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv1i16.i16( %0, i16 9, @@ -1650,10 +1831,12 @@ } define @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i16.i16( %0, %1, @@ -1665,10 +1848,12 @@ } define @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv2i16.i16( %0, i16 9, @@ -1678,10 +1863,12 @@ } define @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i16.i16( %0, %1, @@ -1693,10 +1880,12 @@ } define @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv4i16.i16( %0, i16 9, @@ -1706,10 +1895,12 @@ } define @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i16.i16( %0, %1, @@ -1721,10 +1912,12 @@ } define @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv8i16.i16( %0, i16 9, @@ -1734,10 +1927,12 @@ } define @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i16.i16( %0, %1, @@ -1749,10 +1944,12 @@ } define @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv16i16.i16( %0, i16 9, @@ -1762,10 +1959,12 @@ } define @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i16.i16( %0, %1, @@ -1777,10 +1976,12 @@ } define @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv32i16.i16( %0, i16 9, @@ -1790,10 +1991,12 @@ } define @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,tu,mu +; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i16.i16( %0, %1, @@ -1805,10 +2008,12 @@ } define @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv1i32.i32( %0, i32 9, @@ -1818,10 +2023,12 @@ } define @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i32.i32( %0, %1, @@ -1833,10 +2040,12 @@ } define @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv2i32.i32( %0, i32 9, @@ -1846,10 +2055,12 @@ } define @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i32.i32( %0, %1, @@ -1861,10 +2072,12 @@ } define @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv4i32.i32( %0, i32 9, @@ -1874,10 +2087,12 @@ } define @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i32.i32( %0, %1, @@ -1889,10 +2104,12 @@ } define @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv8i32.i32( %0, i32 9, @@ -1902,10 +2119,12 @@ } define @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i32.i32( %0, %1, @@ -1917,10 +2136,12 @@ } define @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv16i32.i32( %0, i32 9, @@ -1930,10 +2151,12 @@ } define @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,tu,mu +; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i32.i32( %0, %1, diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s declare @llvm.riscv.vadd.nxv1i8.nxv1i8( @@ -6,10 +7,12 @@ i64); define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( %0, %1, @@ -26,10 +29,12 @@ i64); define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( %0, %1, @@ -46,10 +51,12 @@ i64); define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv2i8.nxv2i8( %0, %1, @@ -66,10 +73,12 @@ i64); define @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8( %0, %1, @@ -86,10 +95,12 @@ i64); define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv4i8.nxv4i8( %0, %1, @@ -106,10 +117,12 @@ i64); define @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8( %0, %1, @@ -126,10 +139,12 @@ i64); define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv8i8.nxv8i8( %0, %1, @@ -146,10 +161,12 @@ i64); define @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( %0, %1, @@ -166,10 +183,12 @@ i64); define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv16i8.nxv16i8( %0, %1, @@ -186,10 +205,12 @@ i64); define @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8( %0, %1, @@ -206,10 +227,12 @@ i64); define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv32i8.nxv32i8( %0, %1, @@ -226,10 +249,12 @@ i64); define @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8( %0, %1, @@ -246,10 +271,12 @@ i64); define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv64i8.nxv64i8( %0, %1, @@ -266,10 +293,14 @@ i64); define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu +; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8( %0, %1, @@ -286,10 +317,12 @@ i64); define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv1i16.nxv1i16( %0, %1, @@ -306,10 +339,12 @@ i64); define @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16( %0, %1, @@ -326,10 +361,12 @@ i64); define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv2i16.nxv2i16( %0, %1, @@ -346,10 +383,12 @@ i64); define @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16( %0, %1, @@ -366,10 +405,12 @@ i64); define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv4i16.nxv4i16( %0, %1, @@ -386,10 +427,12 @@ i64); define @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16( %0, %1, @@ -406,10 +449,12 @@ i64); define @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv8i16.nxv8i16( %0, %1, @@ -426,10 +471,12 @@ i64); define @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16( %0, %1, @@ -446,10 +493,12 @@ i64); define @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv16i16.nxv16i16( %0, %1, @@ -466,10 +515,12 @@ i64); define @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16( %0, %1, @@ -486,10 +537,12 @@ i64); define @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv32i16.nxv32i16( %0, %1, @@ -506,10 +559,14 @@ i64); define @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu +; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16( %0, %1, @@ -526,10 +583,12 @@ i64); define @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv1i32.nxv1i32( %0, %1, @@ -546,10 +605,12 @@ i64); define @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32( %0, %1, @@ -566,10 +627,12 @@ i64); define @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv2i32.nxv2i32( %0, %1, @@ -586,10 +649,12 @@ i64); define @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32( %0, %1, @@ -606,10 +671,12 @@ i64); define @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv4i32.nxv4i32( %0, %1, @@ -626,10 +693,12 @@ i64); define @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( %0, %1, @@ -646,10 +715,12 @@ i64); define @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv8i32.nxv8i32( %0, %1, @@ -666,10 +737,12 @@ i64); define @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32( %0, %1, @@ -686,10 +759,12 @@ i64); define @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv16i32.nxv16i32( %0, %1, @@ -706,10 +781,14 @@ i64); define @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu +; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32( %0, %1, @@ -726,10 +805,12 @@ i64); define @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64( %0, %1, @@ -746,10 +827,12 @@ i64); define @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( %0, %1, @@ -766,10 +849,12 @@ i64); define @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv2i64.nxv2i64( %0, %1, @@ -786,10 +871,12 @@ i64); define @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64( %0, %1, @@ -806,10 +893,12 @@ i64); define @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv4i64.nxv4i64( %0, %1, @@ -826,10 +915,12 @@ i64); define @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64( %0, %1, @@ -846,10 +937,12 @@ i64); define @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} %a = call @llvm.riscv.vadd.nxv8i64.nxv8i64( %0, %1, @@ -866,10 +959,14 @@ i64); define @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu +; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu -; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64( %0, %1, @@ -886,10 +983,12 @@ i64); define @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv1i8.i8( %0, i8 %1, @@ -906,10 +1005,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i8.i8( %0, %1, @@ -926,10 +1027,12 @@ i64); define @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv2i8.i8( %0, i8 %1, @@ -946,10 +1049,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i8.i8( %0, %1, @@ -966,10 +1071,12 @@ i64); define @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv4i8.i8( %0, i8 %1, @@ -986,10 +1093,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i8.i8( %0, %1, @@ -1006,10 +1115,12 @@ i64); define @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv8i8.i8( %0, i8 %1, @@ -1026,10 +1137,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i8.i8( %0, %1, @@ -1046,10 +1159,12 @@ i64); define @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv16i8.i8( %0, i8 %1, @@ -1066,10 +1181,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i8.i8( %0, %1, @@ -1086,10 +1203,12 @@ i64); define @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv32i8.i8( %0, i8 %1, @@ -1106,10 +1225,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i8.i8( %0, %1, @@ -1126,10 +1247,12 @@ i64); define @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv64i8.i8( %0, i8 %1, @@ -1146,10 +1269,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m8,tu,mu +; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv64i8.i8( %0, %1, @@ -1166,10 +1291,12 @@ i64); define @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv1i16.i16( %0, i16 %1, @@ -1186,10 +1313,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i16.i16( %0, %1, @@ -1206,10 +1335,12 @@ i64); define @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv2i16.i16( %0, i16 %1, @@ -1226,10 +1357,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i16.i16( %0, %1, @@ -1246,10 +1379,12 @@ i64); define @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv4i16.i16( %0, i16 %1, @@ -1266,10 +1401,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i16.i16( %0, %1, @@ -1286,10 +1423,12 @@ i64); define @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv8i16.i16( %0, i16 %1, @@ -1306,10 +1445,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i16.i16( %0, %1, @@ -1326,10 +1467,12 @@ i64); define @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv16i16.i16( %0, i16 %1, @@ -1346,10 +1489,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i16.i16( %0, %1, @@ -1366,10 +1511,12 @@ i64); define @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv32i16.i16( %0, i16 %1, @@ -1386,10 +1533,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu +; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i16.i16( %0, %1, @@ -1406,10 +1555,12 @@ i64); define @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv1i32.i32( %0, i32 %1, @@ -1426,10 +1577,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i32.i32( %0, %1, @@ -1446,10 +1599,12 @@ i64); define @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv2i32.i32( %0, i32 %1, @@ -1466,10 +1621,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i32.i32( %0, %1, @@ -1486,10 +1643,12 @@ i64); define @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv4i32.i32( %0, i32 %1, @@ -1506,10 +1665,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i32.i32( %0, %1, @@ -1526,10 +1687,12 @@ i64); define @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv8i32.i32( %0, i32 %1, @@ -1546,10 +1709,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i32.i32( %0, %1, @@ -1566,10 +1731,12 @@ i64); define @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv16i32.i32( %0, i32 %1, @@ -1586,10 +1753,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu +; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i32.i32( %0, %1, @@ -1606,10 +1775,12 @@ i64); define @intrinsic_vadd_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv1i64.i64( %0, i64 %1, @@ -1626,10 +1797,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i64.i64( %0, %1, @@ -1646,10 +1819,12 @@ i64); define @intrinsic_vadd_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i64_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv2i64.i64( %0, i64 %1, @@ -1666,10 +1841,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i64.i64( %0, %1, @@ -1686,10 +1863,12 @@ i64); define @intrinsic_vadd_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i64_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv4i64.i64( %0, i64 %1, @@ -1706,10 +1885,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i64.i64( %0, %1, @@ -1726,10 +1907,12 @@ i64); define @intrinsic_vadd_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i64_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} %a = call @llvm.riscv.vadd.nxv8i64.i64( %0, i64 %1, @@ -1746,10 +1929,12 @@ i64); define @intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu +; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu -; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i64.i64( %0, %1, @@ -1761,10 +1946,12 @@ } define @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv1i8.i8( %0, i8 9, @@ -1774,10 +1961,12 @@ } define @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i8.i8( %0, %1, @@ -1789,10 +1978,12 @@ } define @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv2i8.i8( %0, i8 9, @@ -1802,10 +1993,12 @@ } define @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i8.i8( %0, %1, @@ -1817,10 +2010,12 @@ } define @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv4i8.i8( %0, i8 9, @@ -1830,10 +2025,12 @@ } define @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i8.i8( %0, %1, @@ -1845,10 +2042,12 @@ } define @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv8i8.i8( %0, i8 9, @@ -1858,10 +2057,12 @@ } define @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i8.i8( %0, %1, @@ -1873,10 +2074,12 @@ } define @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv16i8.i8( %0, i8 9, @@ -1886,10 +2089,12 @@ } define @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i8.i8( %0, %1, @@ -1901,10 +2106,12 @@ } define @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv32i8.i8( %0, i8 9, @@ -1914,10 +2121,12 @@ } define @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i8.i8( %0, %1, @@ -1929,10 +2138,12 @@ } define @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv64i8.i8( %0, i8 9, @@ -1942,10 +2153,12 @@ } define @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,tu,mu +; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv64i8.i8( %0, %1, @@ -1957,10 +2170,12 @@ } define @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv1i16.i16( %0, i16 9, @@ -1970,10 +2185,12 @@ } define @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i16.i16( %0, %1, @@ -1985,10 +2202,12 @@ } define @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv2i16.i16( %0, i16 9, @@ -1998,10 +2217,12 @@ } define @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i16.i16( %0, %1, @@ -2013,10 +2234,12 @@ } define @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv4i16.i16( %0, i16 9, @@ -2026,10 +2249,12 @@ } define @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i16.i16( %0, %1, @@ -2041,10 +2266,12 @@ } define @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv8i16.i16( %0, i16 9, @@ -2054,10 +2281,12 @@ } define @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i16.i16( %0, %1, @@ -2069,10 +2298,12 @@ } define @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv16i16.i16( %0, i16 9, @@ -2082,10 +2313,12 @@ } define @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i16.i16( %0, %1, @@ -2097,10 +2330,12 @@ } define @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv32i16.i16( %0, i16 9, @@ -2110,10 +2345,12 @@ } define @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,tu,mu +; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv32i16.i16( %0, %1, @@ -2125,10 +2362,12 @@ } define @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv1i32.i32( %0, i32 9, @@ -2138,10 +2377,12 @@ } define @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i32.i32( %0, %1, @@ -2153,10 +2394,12 @@ } define @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv2i32.i32( %0, i32 9, @@ -2166,10 +2409,12 @@ } define @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i32.i32( %0, %1, @@ -2181,10 +2426,12 @@ } define @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv4i32.i32( %0, i32 9, @@ -2194,10 +2441,12 @@ } define @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i32.i32( %0, %1, @@ -2209,10 +2458,12 @@ } define @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv8i32.i32( %0, i32 9, @@ -2222,10 +2473,12 @@ } define @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i32.i32( %0, %1, @@ -2237,10 +2490,12 @@ } define @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv16i32.i32( %0, i32 9, @@ -2250,10 +2505,12 @@ } define @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,tu,mu +; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv16i32.i32( %0, %1, @@ -2265,10 +2522,12 @@ } define @intrinsic_vadd_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv1i64.i64( %0, i64 9, @@ -2278,10 +2537,12 @@ } define @intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv1i64.i64( %0, %1, @@ -2293,10 +2554,12 @@ } define @intrinsic_vadd_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i64_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv2i64.i64( %0, i64 9, @@ -2306,10 +2569,12 @@ } define @intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv2i64.i64( %0, %1, @@ -2321,10 +2586,12 @@ } define @intrinsic_vadd_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i64_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv4i64.i64( %0, i64 9, @@ -2334,10 +2601,12 @@ } define @intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv4i64.i64( %0, %1, @@ -2349,10 +2618,12 @@ } define @intrinsic_vadd_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i64_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_vi_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 %a = call @llvm.riscv.vadd.nxv8i64.i64( %0, i64 9, @@ -2362,10 +2633,12 @@ } define @intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu +; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64 -; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu -; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t %a = call @llvm.riscv.vadd.mask.nxv8i64.i64( %0, %1,