diff --git a/llvm/test/CodeGen/RISCV/rvv/vcpop-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop-rv32.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vcpop-rv32.ll +++ /dev/null @@ -1,282 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare i32 @llvm.riscv.vcpop.i32.nxv1i1( - , - i32); - -define i32 @intrinsic_vcpop_m_i32_nxv1i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vcpop.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vcpop.i32.nxv1i1( - %0, - i32 %1) - - ret i32 %a -} - -declare i32 @llvm.riscv.vcpop.mask.i32.nxv1i1( - , - , - i32); - -define i32 @intrinsic_vcpop_mask_m_i32_nxv1i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vcpop.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv1i1( - %0, - %1, - i32 %2) - - ret i32 %a -} - -declare i32 @llvm.riscv.vcpop.i32.nxv2i1( - , - i32); - -define i32 @intrinsic_vcpop_m_i32_nxv2i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vcpop.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vcpop.i32.nxv2i1( - %0, - i32 %1) - - ret i32 %a -} - -declare i32 @llvm.riscv.vcpop.mask.i32.nxv2i1( - , - , - i32); - -define i32 @intrinsic_vcpop_mask_m_i32_nxv2i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vcpop.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv2i1( - %0, - %1, - i32 %2) - - ret i32 %a -} - -declare i32 @llvm.riscv.vcpop.i32.nxv4i1( - , - i32); - -define i32 @intrinsic_vcpop_m_i32_nxv4i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vcpop.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vcpop.i32.nxv4i1( - %0, - i32 %1) - - ret i32 %a -} - -declare i32 @llvm.riscv.vcpop.mask.i32.nxv4i1( - , - , - i32); - -define i32 @intrinsic_vcpop_mask_m_i32_nxv4i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vcpop.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv4i1( - %0, - %1, - i32 %2) - - ret i32 %a -} - -declare i32 @llvm.riscv.vcpop.i32.nxv8i1( - , - i32); - -define i32 @intrinsic_vcpop_m_i32_nxv8i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vcpop.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vcpop.i32.nxv8i1( - %0, - i32 %1) - - ret i32 %a -} - -declare i32 @llvm.riscv.vcpop.mask.i32.nxv8i1( - , - , - i32); - -define i32 @intrinsic_vcpop_mask_m_i32_nxv8i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vcpop.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv8i1( - %0, - %1, - i32 %2) - - ret i32 %a -} - -declare i32 @llvm.riscv.vcpop.i32.nxv16i1( - , - i32); - -define i32 @intrinsic_vcpop_m_i32_nxv16i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vcpop.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vcpop.i32.nxv16i1( - %0, - i32 %1) - - ret i32 %a -} - -declare i32 @llvm.riscv.vcpop.mask.i32.nxv16i1( - , - , - i32); - -define i32 @intrinsic_vcpop_mask_m_i32_nxv16i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vcpop.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv16i1( - %0, - %1, - i32 %2) - - ret i32 %a -} - -declare i32 @llvm.riscv.vcpop.i32.nxv32i1( - , - i32); - -define i32 @intrinsic_vcpop_m_i32_nxv32i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vcpop.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vcpop.i32.nxv32i1( - %0, - i32 %1) - - ret i32 %a -} - -declare i32 @llvm.riscv.vcpop.mask.i32.nxv32i1( - , - , - i32); - -define i32 @intrinsic_vcpop_mask_m_i32_nxv32i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vcpop.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv32i1( - %0, - %1, - i32 %2) - - ret i32 %a -} - -declare i32 @llvm.riscv.vcpop.i32.nxv64i1( - , - i32); - -define i32 @intrinsic_vcpop_m_i32_nxv64i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vcpop.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vcpop.i32.nxv64i1( - %0, - i32 %1) - - ret i32 %a -} - -declare i32 @llvm.riscv.vcpop.mask.i32.nxv64i1( - , - , - i32); - -define i32 @intrinsic_vcpop_mask_m_i32_nxv64i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vcpop.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv64i1( - %0, - %1, - i32 %2) - - ret i32 %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vcpop-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vcpop-rv64.ll +++ /dev/null @@ -1,282 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare i64 @llvm.riscv.vcpop.i64.nxv1i1( - , - i64); - -define i64 @intrinsic_vcpop_m_i64_nxv1i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vcpop.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vcpop.i64.nxv1i1( - %0, - i64 %1) - - ret i64 %a -} - -declare i64 @llvm.riscv.vcpop.mask.i64.nxv1i1( - , - , - i64); - -define i64 @intrinsic_vcpop_mask_m_i64_nxv1i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vcpop.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv1i1( - %0, - %1, - i64 %2) - - ret i64 %a -} - -declare i64 @llvm.riscv.vcpop.i64.nxv2i1( - , - i64); - -define i64 @intrinsic_vcpop_m_i64_nxv2i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vcpop.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vcpop.i64.nxv2i1( - %0, - i64 %1) - - ret i64 %a -} - -declare i64 @llvm.riscv.vcpop.mask.i64.nxv2i1( - , - , - i64); - -define i64 @intrinsic_vcpop_mask_m_i64_nxv2i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vcpop.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv2i1( - %0, - %1, - i64 %2) - - ret i64 %a -} - -declare i64 @llvm.riscv.vcpop.i64.nxv4i1( - , - i64); - -define i64 @intrinsic_vcpop_m_i64_nxv4i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vcpop.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vcpop.i64.nxv4i1( - %0, - i64 %1) - - ret i64 %a -} - -declare i64 @llvm.riscv.vcpop.mask.i64.nxv4i1( - , - , - i64); - -define i64 @intrinsic_vcpop_mask_m_i64_nxv4i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vcpop.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv4i1( - %0, - %1, - i64 %2) - - ret i64 %a -} - -declare i64 @llvm.riscv.vcpop.i64.nxv8i1( - , - i64); - -define i64 @intrinsic_vcpop_m_i64_nxv8i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vcpop.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vcpop.i64.nxv8i1( - %0, - i64 %1) - - ret i64 %a -} - -declare i64 @llvm.riscv.vcpop.mask.i64.nxv8i1( - , - , - i64); - -define i64 @intrinsic_vcpop_mask_m_i64_nxv8i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vcpop.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv8i1( - %0, - %1, - i64 %2) - - ret i64 %a -} - -declare i64 @llvm.riscv.vcpop.i64.nxv16i1( - , - i64); - -define i64 @intrinsic_vcpop_m_i64_nxv16i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vcpop.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vcpop.i64.nxv16i1( - %0, - i64 %1) - - ret i64 %a -} - -declare i64 @llvm.riscv.vcpop.mask.i64.nxv16i1( - , - , - i64); - -define i64 @intrinsic_vcpop_mask_m_i64_nxv16i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vcpop.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv16i1( - %0, - %1, - i64 %2) - - ret i64 %a -} - -declare i64 @llvm.riscv.vcpop.i64.nxv32i1( - , - i64); - -define i64 @intrinsic_vcpop_m_i64_nxv32i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vcpop.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vcpop.i64.nxv32i1( - %0, - i64 %1) - - ret i64 %a -} - -declare i64 @llvm.riscv.vcpop.mask.i64.nxv32i1( - , - , - i64); - -define i64 @intrinsic_vcpop_mask_m_i64_nxv32i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vcpop.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv32i1( - %0, - %1, - i64 %2) - - ret i64 %a -} - -declare i64 @llvm.riscv.vcpop.i64.nxv64i1( - , - i64); - -define i64 @intrinsic_vcpop_m_i64_nxv64i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vcpop.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vcpop.i64.nxv64i1( - %0, - i64 %1) - - ret i64 %a -} - -declare i64 @llvm.riscv.vcpop.mask.i64.nxv64i1( - , - , - i64); - -define i64 @intrinsic_vcpop_mask_m_i64_nxv64i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vcpop.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv64i1( - %0, - %1, - i64 %2) - - ret i64 %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll @@ -0,0 +1,284 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +declare iXLen @llvm.riscv.vcpop.iXLen.nxv1i1( + , + iXLen); + +define iXLen @intrinsic_vcpop_m_nxv1i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vcpop.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vcpop.iXLen.nxv1i1( + %0, + iXLen %1) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv1i1( + , + , + iXLen); + +define iXLen @intrinsic_vcpop_mask_m_nxv1i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv1i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vcpop.iXLen.nxv2i1( + , + iXLen); + +define iXLen @intrinsic_vcpop_m_nxv2i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vcpop.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vcpop.iXLen.nxv2i1( + %0, + iXLen %1) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv2i1( + , + , + iXLen); + +define iXLen @intrinsic_vcpop_mask_m_nxv2i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv2i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vcpop.iXLen.nxv4i1( + , + iXLen); + +define iXLen @intrinsic_vcpop_m_nxv4i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vcpop.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vcpop.iXLen.nxv4i1( + %0, + iXLen %1) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv4i1( + , + , + iXLen); + +define iXLen @intrinsic_vcpop_mask_m_nxv4i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv4i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vcpop.iXLen.nxv8i1( + , + iXLen); + +define iXLen @intrinsic_vcpop_m_nxv8i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vcpop.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vcpop.iXLen.nxv8i1( + %0, + iXLen %1) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv8i1( + , + , + iXLen); + +define iXLen @intrinsic_vcpop_mask_m_nxv8i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv8i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vcpop.iXLen.nxv16i1( + , + iXLen); + +define iXLen @intrinsic_vcpop_m_nxv16i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vcpop.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vcpop.iXLen.nxv16i1( + %0, + iXLen %1) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv16i1( + , + , + iXLen); + +define iXLen @intrinsic_vcpop_mask_m_nxv16i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv16i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vcpop.iXLen.nxv32i1( + , + iXLen); + +define iXLen @intrinsic_vcpop_m_nxv32i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vcpop.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vcpop.iXLen.nxv32i1( + %0, + iXLen %1) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv32i1( + , + , + iXLen); + +define iXLen @intrinsic_vcpop_mask_m_nxv32i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv32i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vcpop.iXLen.nxv64i1( + , + iXLen); + +define iXLen @intrinsic_vcpop_m_nxv64i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vcpop.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vcpop.iXLen.nxv64i1( + %0, + iXLen %1) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv64i1( + , + , + iXLen); + +define iXLen @intrinsic_vcpop_mask_m_nxv64i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv64i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll +++ /dev/null @@ -1,282 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare i32 @llvm.riscv.vfirst.i32.nxv1i1( - , - i32); - -define i32 @intrinsic_vfirst_m_i32_nxv1i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vfirst.i32.nxv1i1( - %0, - i32 %1) - - ret i32 %a -} - -declare i32 @llvm.riscv.vfirst.mask.i32.nxv1i1( - , - , - i32); - -define i32 @intrinsic_vfirst_mask_m_i32_nxv1i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vfirst.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv1i1( - %0, - %1, - i32 %2) - - ret i32 %a -} - -declare i32 @llvm.riscv.vfirst.i32.nxv2i1( - , - i32); - -define i32 @intrinsic_vfirst_m_i32_nxv2i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vfirst.i32.nxv2i1( - %0, - i32 %1) - - ret i32 %a -} - -declare i32 @llvm.riscv.vfirst.mask.i32.nxv2i1( - , - , - i32); - -define i32 @intrinsic_vfirst_mask_m_i32_nxv2i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vfirst.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv2i1( - %0, - %1, - i32 %2) - - ret i32 %a -} - -declare i32 @llvm.riscv.vfirst.i32.nxv4i1( - , - i32); - -define i32 @intrinsic_vfirst_m_i32_nxv4i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vfirst.i32.nxv4i1( - %0, - i32 %1) - - ret i32 %a -} - -declare i32 @llvm.riscv.vfirst.mask.i32.nxv4i1( - , - , - i32); - -define i32 @intrinsic_vfirst_mask_m_i32_nxv4i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vfirst.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv4i1( - %0, - %1, - i32 %2) - - ret i32 %a -} - -declare i32 @llvm.riscv.vfirst.i32.nxv8i1( - , - i32); - -define i32 @intrinsic_vfirst_m_i32_nxv8i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vfirst.i32.nxv8i1( - %0, - i32 %1) - - ret i32 %a -} - -declare i32 @llvm.riscv.vfirst.mask.i32.nxv8i1( - , - , - i32); - -define i32 @intrinsic_vfirst_mask_m_i32_nxv8i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vfirst.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv8i1( - %0, - %1, - i32 %2) - - ret i32 %a -} - -declare i32 @llvm.riscv.vfirst.i32.nxv16i1( - , - i32); - -define i32 @intrinsic_vfirst_m_i32_nxv16i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vfirst.i32.nxv16i1( - %0, - i32 %1) - - ret i32 %a -} - -declare i32 @llvm.riscv.vfirst.mask.i32.nxv16i1( - , - , - i32); - -define i32 @intrinsic_vfirst_mask_m_i32_nxv16i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vfirst.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv16i1( - %0, - %1, - i32 %2) - - ret i32 %a -} - -declare i32 @llvm.riscv.vfirst.i32.nxv32i1( - , - i32); - -define i32 @intrinsic_vfirst_m_i32_nxv32i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vfirst.i32.nxv32i1( - %0, - i32 %1) - - ret i32 %a -} - -declare i32 @llvm.riscv.vfirst.mask.i32.nxv32i1( - , - , - i32); - -define i32 @intrinsic_vfirst_mask_m_i32_nxv32i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vfirst.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv32i1( - %0, - %1, - i32 %2) - - ret i32 %a -} - -declare i32 @llvm.riscv.vfirst.i32.nxv64i1( - , - i32); - -define i32 @intrinsic_vfirst_m_i32_nxv64i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vfirst.i32.nxv64i1( - %0, - i32 %1) - - ret i32 %a -} - -declare i32 @llvm.riscv.vfirst.mask.i32.nxv64i1( - , - , - i32); - -define i32 @intrinsic_vfirst_mask_m_i32_nxv64i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vfirst.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv64i1( - %0, - %1, - i32 %2) - - ret i32 %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv64.ll +++ /dev/null @@ -1,282 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare i64 @llvm.riscv.vfirst.i64.nxv1i1( - , - i64); - -define i64 @intrinsic_vfirst_m_i64_nxv1i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vfirst.i64.nxv1i1( - %0, - i64 %1) - - ret i64 %a -} - -declare i64 @llvm.riscv.vfirst.mask.i64.nxv1i1( - , - , - i64); - -define i64 @intrinsic_vfirst_mask_m_i64_nxv1i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vfirst.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv1i1( - %0, - %1, - i64 %2) - - ret i64 %a -} - -declare i64 @llvm.riscv.vfirst.i64.nxv2i1( - , - i64); - -define i64 @intrinsic_vfirst_m_i64_nxv2i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vfirst.i64.nxv2i1( - %0, - i64 %1) - - ret i64 %a -} - -declare i64 @llvm.riscv.vfirst.mask.i64.nxv2i1( - , - , - i64); - -define i64 @intrinsic_vfirst_mask_m_i64_nxv2i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vfirst.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv2i1( - %0, - %1, - i64 %2) - - ret i64 %a -} - -declare i64 @llvm.riscv.vfirst.i64.nxv4i1( - , - i64); - -define i64 @intrinsic_vfirst_m_i64_nxv4i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vfirst.i64.nxv4i1( - %0, - i64 %1) - - ret i64 %a -} - -declare i64 @llvm.riscv.vfirst.mask.i64.nxv4i1( - , - , - i64); - -define i64 @intrinsic_vfirst_mask_m_i64_nxv4i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vfirst.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv4i1( - %0, - %1, - i64 %2) - - ret i64 %a -} - -declare i64 @llvm.riscv.vfirst.i64.nxv8i1( - , - i64); - -define i64 @intrinsic_vfirst_m_i64_nxv8i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vfirst.i64.nxv8i1( - %0, - i64 %1) - - ret i64 %a -} - -declare i64 @llvm.riscv.vfirst.mask.i64.nxv8i1( - , - , - i64); - -define i64 @intrinsic_vfirst_mask_m_i64_nxv8i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vfirst.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv8i1( - %0, - %1, - i64 %2) - - ret i64 %a -} - -declare i64 @llvm.riscv.vfirst.i64.nxv16i1( - , - i64); - -define i64 @intrinsic_vfirst_m_i64_nxv16i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vfirst.i64.nxv16i1( - %0, - i64 %1) - - ret i64 %a -} - -declare i64 @llvm.riscv.vfirst.mask.i64.nxv16i1( - , - , - i64); - -define i64 @intrinsic_vfirst_mask_m_i64_nxv16i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vfirst.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv16i1( - %0, - %1, - i64 %2) - - ret i64 %a -} - -declare i64 @llvm.riscv.vfirst.i64.nxv32i1( - , - i64); - -define i64 @intrinsic_vfirst_m_i64_nxv32i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vfirst.i64.nxv32i1( - %0, - i64 %1) - - ret i64 %a -} - -declare i64 @llvm.riscv.vfirst.mask.i64.nxv32i1( - , - , - i64); - -define i64 @intrinsic_vfirst_mask_m_i64_nxv32i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vfirst.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv32i1( - %0, - %1, - i64 %2) - - ret i64 %a -} - -declare i64 @llvm.riscv.vfirst.i64.nxv64i1( - , - i64); - -define i64 @intrinsic_vfirst_m_i64_nxv64i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vfirst.i64.nxv64i1( - %0, - i64 %1) - - ret i64 %a -} - -declare i64 @llvm.riscv.vfirst.mask.i64.nxv64i1( - , - , - i64); - -define i64 @intrinsic_vfirst_mask_m_i64_nxv64i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vfirst.m a0, v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv64i1( - %0, - %1, - i64 %2) - - ret i64 %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll @@ -0,0 +1,284 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +declare iXLen @llvm.riscv.vfirst.iXLen.nxv1i1( + , + iXLen); + +define iXLen @intrinsic_vfirst_m_nxv1i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfirst_m_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfirst.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vfirst.iXLen.nxv1i1( + %0, + iXLen %1) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv1i1( + , + , + iXLen); + +define iXLen @intrinsic_vfirst_mask_m_nxv1i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfirst.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vfirst.mask.iXLen.nxv1i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vfirst.iXLen.nxv2i1( + , + iXLen); + +define iXLen @intrinsic_vfirst_m_nxv2i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfirst_m_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfirst.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vfirst.iXLen.nxv2i1( + %0, + iXLen %1) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv2i1( + , + , + iXLen); + +define iXLen @intrinsic_vfirst_mask_m_nxv2i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfirst.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vfirst.mask.iXLen.nxv2i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vfirst.iXLen.nxv4i1( + , + iXLen); + +define iXLen @intrinsic_vfirst_m_nxv4i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfirst_m_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfirst.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vfirst.iXLen.nxv4i1( + %0, + iXLen %1) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv4i1( + , + , + iXLen); + +define iXLen @intrinsic_vfirst_mask_m_nxv4i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfirst.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vfirst.mask.iXLen.nxv4i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vfirst.iXLen.nxv8i1( + , + iXLen); + +define iXLen @intrinsic_vfirst_m_nxv8i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfirst_m_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfirst.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vfirst.iXLen.nxv8i1( + %0, + iXLen %1) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv8i1( + , + , + iXLen); + +define iXLen @intrinsic_vfirst_mask_m_nxv8i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfirst.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vfirst.mask.iXLen.nxv8i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vfirst.iXLen.nxv16i1( + , + iXLen); + +define iXLen @intrinsic_vfirst_m_nxv16i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfirst_m_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfirst.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vfirst.iXLen.nxv16i1( + %0, + iXLen %1) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv16i1( + , + , + iXLen); + +define iXLen @intrinsic_vfirst_mask_m_nxv16i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfirst.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vfirst.mask.iXLen.nxv16i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vfirst.iXLen.nxv32i1( + , + iXLen); + +define iXLen @intrinsic_vfirst_m_nxv32i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfirst_m_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfirst.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vfirst.iXLen.nxv32i1( + %0, + iXLen %1) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv32i1( + , + , + iXLen); + +define iXLen @intrinsic_vfirst_mask_m_nxv32i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfirst.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vfirst.mask.iXLen.nxv32i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vfirst.iXLen.nxv64i1( + , + iXLen); + +define iXLen @intrinsic_vfirst_m_nxv64i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfirst_m_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vfirst.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vfirst.iXLen.nxv64i1( + %0, + iXLen %1) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv64i1( + , + , + iXLen); + +define iXLen @intrinsic_vfirst_mask_m_nxv64i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vfirst.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.vfirst.mask.iXLen.nxv64i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll +++ /dev/null @@ -1,758 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vid.nxv1i8( - i64); - -define @intrinsic_vid_v_nxv1i8(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv1i8( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv1i8( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv1i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv2i8( - i64); - -define @intrinsic_vid_v_nxv2i8(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv2i8( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv2i8( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv2i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv2i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv4i8( - i64); - -define @intrinsic_vid_v_nxv4i8(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv4i8( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv4i8( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv4i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv4i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv8i8( - i64); - -define @intrinsic_vid_v_nxv8i8(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv8i8( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv8i8( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv8i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv8i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv16i8( - i64); - -define @intrinsic_vid_v_nxv16i8(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv16i8( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv16i8( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv16i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv16i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv32i8( - i64); - -define @intrinsic_vid_v_nxv32i8(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv32i8( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv32i8( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv32i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv32i8( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv1i16( - i64); - -define @intrinsic_vid_v_nxv1i16(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv1i16( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv1i16( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv1i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv1i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv2i16( - i64); - -define @intrinsic_vid_v_nxv2i16(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv2i16( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv2i16( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv2i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv2i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv4i16( - i64); - -define @intrinsic_vid_v_nxv4i16(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv4i16( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv4i16( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv4i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv4i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv8i16( - i64); - -define @intrinsic_vid_v_nxv8i16(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv8i16( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv8i16( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv8i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv8i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv16i16( - i64); - -define @intrinsic_vid_v_nxv16i16(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv16i16( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv16i16( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv16i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv16i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv32i16( - i64); - -define @intrinsic_vid_v_nxv32i16(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv32i16( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv32i16( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv32i16( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv32i16( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv1i32( - i64); - -define @intrinsic_vid_v_nxv1i32(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv1i32( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv1i32( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv1i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv1i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv2i32( - i64); - -define @intrinsic_vid_v_nxv2i32(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv2i32( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv2i32( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv2i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv2i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv4i32( - i64); - -define @intrinsic_vid_v_nxv4i32(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv4i32( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv4i32( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv4i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv4i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv8i32( - i64); - -define @intrinsic_vid_v_nxv8i32(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv8i32( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv8i32( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv8i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv8i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv16i32( - i64); - -define @intrinsic_vid_v_nxv16i32(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv16i32( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv16i32( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv16i32( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv16i32( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv1i64( - i64); - -define @intrinsic_vid_v_nxv1i64(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv1i64( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv1i64( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv1i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv1i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv2i64( - i64); - -define @intrinsic_vid_v_nxv2i64(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv2i64( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv2i64( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv2i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv2i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv4i64( - i64); - -define @intrinsic_vid_v_nxv4i64(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv4i64( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv4i64( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv4i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv4i64( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vid.nxv8i64( - i64); - -define @intrinsic_vid_v_nxv8i64(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vid_v_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.nxv8i64( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vid.mask.nxv8i64( - , - , - i64); - -define @intrinsic_vid_mask_v_nxv8i64( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vid.v v8, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vid.mask.nxv8i64( - %0, - %1, - i64 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vid.ll rename from llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vid.ll --- a/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vid.ll @@ -1,10 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s declare @llvm.riscv.vid.nxv1i8( - i32); + iXLen); -define @intrinsic_vid_v_nxv1i8(i32 %0) nounwind { +define @intrinsic_vid_v_nxv1i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -12,7 +14,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv1i8( - i32 %0) + iXLen %0) ret %a } @@ -20,9 +22,9 @@ declare @llvm.riscv.vid.mask.nxv1i8( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv1i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu @@ -32,15 +34,15 @@ %a = call @llvm.riscv.vid.mask.nxv1i8( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv2i8( - i32); + iXLen); -define @intrinsic_vid_v_nxv2i8(i32 %0) nounwind { +define @intrinsic_vid_v_nxv2i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -48,7 +50,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv2i8( - i32 %0) + iXLen %0) ret %a } @@ -56,9 +58,9 @@ declare @llvm.riscv.vid.mask.nxv2i8( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv2i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu @@ -68,15 +70,15 @@ %a = call @llvm.riscv.vid.mask.nxv2i8( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv4i8( - i32); + iXLen); -define @intrinsic_vid_v_nxv4i8(i32 %0) nounwind { +define @intrinsic_vid_v_nxv4i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -84,7 +86,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv4i8( - i32 %0) + iXLen %0) ret %a } @@ -92,9 +94,9 @@ declare @llvm.riscv.vid.mask.nxv4i8( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv4i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu @@ -104,15 +106,15 @@ %a = call @llvm.riscv.vid.mask.nxv4i8( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv8i8( - i32); + iXLen); -define @intrinsic_vid_v_nxv8i8(i32 %0) nounwind { +define @intrinsic_vid_v_nxv8i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -120,7 +122,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv8i8( - i32 %0) + iXLen %0) ret %a } @@ -128,9 +130,9 @@ declare @llvm.riscv.vid.mask.nxv8i8( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv8i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu @@ -140,15 +142,15 @@ %a = call @llvm.riscv.vid.mask.nxv8i8( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv16i8( - i32); + iXLen); -define @intrinsic_vid_v_nxv16i8(i32 %0) nounwind { +define @intrinsic_vid_v_nxv16i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -156,7 +158,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv16i8( - i32 %0) + iXLen %0) ret %a } @@ -164,9 +166,9 @@ declare @llvm.riscv.vid.mask.nxv16i8( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv16i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu @@ -176,15 +178,15 @@ %a = call @llvm.riscv.vid.mask.nxv16i8( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv32i8( - i32); + iXLen); -define @intrinsic_vid_v_nxv32i8(i32 %0) nounwind { +define @intrinsic_vid_v_nxv32i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -192,7 +194,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv32i8( - i32 %0) + iXLen %0) ret %a } @@ -200,9 +202,9 @@ declare @llvm.riscv.vid.mask.nxv32i8( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv32i8( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu @@ -212,15 +214,15 @@ %a = call @llvm.riscv.vid.mask.nxv32i8( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv1i16( - i32); + iXLen); -define @intrinsic_vid_v_nxv1i16(i32 %0) nounwind { +define @intrinsic_vid_v_nxv1i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -228,7 +230,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv1i16( - i32 %0) + iXLen %0) ret %a } @@ -236,9 +238,9 @@ declare @llvm.riscv.vid.mask.nxv1i16( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv1i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -248,15 +250,15 @@ %a = call @llvm.riscv.vid.mask.nxv1i16( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv2i16( - i32); + iXLen); -define @intrinsic_vid_v_nxv2i16(i32 %0) nounwind { +define @intrinsic_vid_v_nxv2i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -264,7 +266,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv2i16( - i32 %0) + iXLen %0) ret %a } @@ -272,9 +274,9 @@ declare @llvm.riscv.vid.mask.nxv2i16( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv2i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -284,15 +286,15 @@ %a = call @llvm.riscv.vid.mask.nxv2i16( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv4i16( - i32); + iXLen); -define @intrinsic_vid_v_nxv4i16(i32 %0) nounwind { +define @intrinsic_vid_v_nxv4i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -300,7 +302,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv4i16( - i32 %0) + iXLen %0) ret %a } @@ -308,9 +310,9 @@ declare @llvm.riscv.vid.mask.nxv4i16( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv4i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -320,15 +322,15 @@ %a = call @llvm.riscv.vid.mask.nxv4i16( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv8i16( - i32); + iXLen); -define @intrinsic_vid_v_nxv8i16(i32 %0) nounwind { +define @intrinsic_vid_v_nxv8i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -336,7 +338,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv8i16( - i32 %0) + iXLen %0) ret %a } @@ -344,9 +346,9 @@ declare @llvm.riscv.vid.mask.nxv8i16( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv8i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -356,15 +358,15 @@ %a = call @llvm.riscv.vid.mask.nxv8i16( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv16i16( - i32); + iXLen); -define @intrinsic_vid_v_nxv16i16(i32 %0) nounwind { +define @intrinsic_vid_v_nxv16i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -372,7 +374,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv16i16( - i32 %0) + iXLen %0) ret %a } @@ -380,9 +382,9 @@ declare @llvm.riscv.vid.mask.nxv16i16( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv16i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -392,15 +394,15 @@ %a = call @llvm.riscv.vid.mask.nxv16i16( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv32i16( - i32); + iXLen); -define @intrinsic_vid_v_nxv32i16(i32 %0) nounwind { +define @intrinsic_vid_v_nxv32i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -408,7 +410,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv32i16( - i32 %0) + iXLen %0) ret %a } @@ -416,9 +418,9 @@ declare @llvm.riscv.vid.mask.nxv32i16( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv32i16( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -428,15 +430,15 @@ %a = call @llvm.riscv.vid.mask.nxv32i16( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv1i32( - i32); + iXLen); -define @intrinsic_vid_v_nxv1i32(i32 %0) nounwind { +define @intrinsic_vid_v_nxv1i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -444,7 +446,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv1i32( - i32 %0) + iXLen %0) ret %a } @@ -452,9 +454,9 @@ declare @llvm.riscv.vid.mask.nxv1i32( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv1i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -464,15 +466,15 @@ %a = call @llvm.riscv.vid.mask.nxv1i32( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv2i32( - i32); + iXLen); -define @intrinsic_vid_v_nxv2i32(i32 %0) nounwind { +define @intrinsic_vid_v_nxv2i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -480,7 +482,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv2i32( - i32 %0) + iXLen %0) ret %a } @@ -488,9 +490,9 @@ declare @llvm.riscv.vid.mask.nxv2i32( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv2i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -500,15 +502,15 @@ %a = call @llvm.riscv.vid.mask.nxv2i32( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv4i32( - i32); + iXLen); -define @intrinsic_vid_v_nxv4i32(i32 %0) nounwind { +define @intrinsic_vid_v_nxv4i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -516,7 +518,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv4i32( - i32 %0) + iXLen %0) ret %a } @@ -524,9 +526,9 @@ declare @llvm.riscv.vid.mask.nxv4i32( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv4i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -536,15 +538,15 @@ %a = call @llvm.riscv.vid.mask.nxv4i32( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv8i32( - i32); + iXLen); -define @intrinsic_vid_v_nxv8i32(i32 %0) nounwind { +define @intrinsic_vid_v_nxv8i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -552,7 +554,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv8i32( - i32 %0) + iXLen %0) ret %a } @@ -560,9 +562,9 @@ declare @llvm.riscv.vid.mask.nxv8i32( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv8i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -572,15 +574,15 @@ %a = call @llvm.riscv.vid.mask.nxv8i32( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv16i32( - i32); + iXLen); -define @intrinsic_vid_v_nxv16i32(i32 %0) nounwind { +define @intrinsic_vid_v_nxv16i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -588,7 +590,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv16i32( - i32 %0) + iXLen %0) ret %a } @@ -596,9 +598,9 @@ declare @llvm.riscv.vid.mask.nxv16i32( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv16i32( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -608,15 +610,15 @@ %a = call @llvm.riscv.vid.mask.nxv16i32( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv1i64( - i32); + iXLen); -define @intrinsic_vid_v_nxv1i64(i32 %0) nounwind { +define @intrinsic_vid_v_nxv1i64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -624,7 +626,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv1i64( - i32 %0) + iXLen %0) ret %a } @@ -632,9 +634,9 @@ declare @llvm.riscv.vid.mask.nxv1i64( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv1i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -644,15 +646,15 @@ %a = call @llvm.riscv.vid.mask.nxv1i64( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv2i64( - i32); + iXLen); -define @intrinsic_vid_v_nxv2i64(i32 %0) nounwind { +define @intrinsic_vid_v_nxv2i64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -660,7 +662,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv2i64( - i32 %0) + iXLen %0) ret %a } @@ -668,9 +670,9 @@ declare @llvm.riscv.vid.mask.nxv2i64( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv2i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -680,15 +682,15 @@ %a = call @llvm.riscv.vid.mask.nxv2i64( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv4i64( - i32); + iXLen); -define @intrinsic_vid_v_nxv4i64(i32 %0) nounwind { +define @intrinsic_vid_v_nxv4i64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -696,7 +698,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv4i64( - i32 %0) + iXLen %0) ret %a } @@ -704,9 +706,9 @@ declare @llvm.riscv.vid.mask.nxv4i64( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv4i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -716,15 +718,15 @@ %a = call @llvm.riscv.vid.mask.nxv4i64( %0, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.vid.nxv8i64( - i32); + iXLen); -define @intrinsic_vid_v_nxv8i64(i32 %0) nounwind { +define @intrinsic_vid_v_nxv8i64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -732,7 +734,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.nxv8i64( - i32 %0) + iXLen %0) ret %a } @@ -740,9 +742,9 @@ declare @llvm.riscv.vid.mask.nxv8i64( , , - i32); + iXLen); -define @intrinsic_vid_mask_v_nxv8i64( %0, %1, i32 %2) nounwind { +define @intrinsic_vid_mask_v_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu @@ -752,7 +754,7 @@ %a = call @llvm.riscv.vid.mask.nxv8i64( %0, %1, - i32 %2) + iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll +++ /dev/null @@ -1,882 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.viota.nxv1i8( - , - i64); - -define @intrinsic_viota_m_nxv1i8_nxv1i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv1i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv1i8( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv1i8( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv2i8( - , - i64); - -define @intrinsic_viota_m_nxv2i8_nxv2i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv2i8_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv2i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv2i8( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv2i8_nxv2i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i8_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv2i8( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv4i8( - , - i64); - -define @intrinsic_viota_m_nxv4i8_nxv4i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv4i8_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv4i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv4i8( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv4i8_nxv4i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i8_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv4i8( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv8i8( - , - i64); - -define @intrinsic_viota_m_nxv8i8_nxv8i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv8i8_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv8i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv8i8( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv8i8_nxv8i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i8_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv8i8( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv16i8( - , - i64); - -define @intrinsic_viota_m_nxv16i8_nxv16i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv16i8_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv16i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv16i8( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv16i8_nxv16i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i8_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv16i8( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv32i8( - , - i64); - -define @intrinsic_viota_m_nxv32i8_nxv32i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv32i8_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv32i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv32i8( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv32i8_nxv32i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i8_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv32i8( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv64i8( - , - i64); - -define @intrinsic_viota_m_nxv64i8_nxv64i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv64i8_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv64i8( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv64i8( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv64i8_nxv64i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv64i8_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv64i8( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv1i16( - , - i64); - -define @intrinsic_viota_m_nxv1i16_nxv1i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv1i16_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv1i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv1i16( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv1i16_nxv1i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i16_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv1i16( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv2i16( - , - i64); - -define @intrinsic_viota_m_nxv2i16_nxv2i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv2i16_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv2i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv2i16( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv2i16_nxv2i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i16_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv2i16( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv4i16( - , - i64); - -define @intrinsic_viota_m_nxv4i16_nxv4i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv4i16_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv4i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv4i16( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv4i16_nxv4i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i16_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv4i16( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv8i16( - , - i64); - -define @intrinsic_viota_m_nxv8i16_nxv8i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv8i16_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv8i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv8i16( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv8i16_nxv8i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i16_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv8i16( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv16i16( - , - i64); - -define @intrinsic_viota_m_nxv16i16_nxv16i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv16i16_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv16i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv16i16( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv16i16_nxv16i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i16_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv16i16( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv32i16( - , - i64); - -define @intrinsic_viota_m_nxv32i16_nxv32i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv32i16_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv32i16( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv32i16( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv32i16_nxv32i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i16_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv32i16( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv1i32( - , - i64); - -define @intrinsic_viota_m_nxv1i32_nxv1i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv1i32_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv1i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv1i32( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv1i32_nxv1i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i32_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv1i32( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv2i32( - , - i64); - -define @intrinsic_viota_m_nxv2i32_nxv2i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv2i32_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv2i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv2i32( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv2i32_nxv2i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i32_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv2i32( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv4i32( - , - i64); - -define @intrinsic_viota_m_nxv4i32_nxv4i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv4i32_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv4i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv4i32( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv4i32_nxv4i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i32_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv4i32( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv8i32( - , - i64); - -define @intrinsic_viota_m_nxv8i32_nxv8i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv8i32_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv8i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv8i32( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv8i32_nxv8i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i32_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv8i32( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv16i32( - , - i64); - -define @intrinsic_viota_m_nxv16i32_nxv16i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv16i32_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv16i32( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv16i32( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv16i32_nxv16i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i32_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv16i32( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv1i64( - , - i64); - -define @intrinsic_viota_m_nxv1i64_nxv1i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv1i64_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv1i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv1i64( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv1i64_nxv1i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i64_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv1i64( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv2i64( - , - i64); - -define @intrinsic_viota_m_nxv2i64_nxv2i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv2i64_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv2i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv2i64( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv2i64_nxv2i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i64_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv2i64( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv4i64( - , - i64); - -define @intrinsic_viota_m_nxv4i64_nxv4i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv4i64_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv4i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv4i64( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv4i64_nxv4i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i64_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv4i64( - %0, - %1, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.viota.nxv8i64( - , - i64); - -define @intrinsic_viota_m_nxv8i64_nxv8i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_viota_m_nxv8i64_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu -; CHECK-NEXT: viota.m v8, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.nxv8i64( - %0, - i64 %1) - - ret %a -} - -declare @llvm.riscv.viota.mask.nxv8i64( - , - , - , - i64); - -define @intrinsic_viota_mask_m_nxv8i64_nxv8i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i64_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: viota.m v8, v0, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.viota.mask.nxv8i64( - %0, - %1, - %1, - i64 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/viota.ll rename from llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/viota.ll --- a/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/viota.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s declare @llvm.riscv.viota.nxv1i8( , - i32); + iXLen); -define @intrinsic_viota_m_nxv1i8_nxv1i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv1i8_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -14,7 +16,7 @@ entry: %a = call @llvm.riscv.viota.nxv1i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -23,9 +25,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu @@ -36,16 +38,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv2i8( , - i32); + iXLen); -define @intrinsic_viota_m_nxv2i8_nxv2i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv2i8_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv2i8_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -54,7 +56,7 @@ entry: %a = call @llvm.riscv.viota.nxv2i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -63,9 +65,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv2i8_nxv2i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv2i8_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i8_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu @@ -76,16 +78,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv4i8( , - i32); + iXLen); -define @intrinsic_viota_m_nxv4i8_nxv4i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv4i8_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv4i8_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -94,7 +96,7 @@ entry: %a = call @llvm.riscv.viota.nxv4i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -103,9 +105,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv4i8_nxv4i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv4i8_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i8_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu @@ -116,16 +118,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv8i8( , - i32); + iXLen); -define @intrinsic_viota_m_nxv8i8_nxv8i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv8i8_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv8i8_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -134,7 +136,7 @@ entry: %a = call @llvm.riscv.viota.nxv8i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -143,9 +145,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv8i8_nxv8i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv8i8_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i8_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu @@ -156,16 +158,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv16i8( , - i32); + iXLen); -define @intrinsic_viota_m_nxv16i8_nxv16i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv16i8_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv16i8_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -174,7 +176,7 @@ entry: %a = call @llvm.riscv.viota.nxv16i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -183,9 +185,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv16i8_nxv16i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv16i8_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i8_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu @@ -196,16 +198,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv32i8( , - i32); + iXLen); -define @intrinsic_viota_m_nxv32i8_nxv32i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv32i8_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv32i8_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -214,7 +216,7 @@ entry: %a = call @llvm.riscv.viota.nxv32i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -223,9 +225,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv32i8_nxv32i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv32i8_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i8_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu @@ -236,16 +238,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv64i8( , - i32); + iXLen); -define @intrinsic_viota_m_nxv64i8_nxv64i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv64i8_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv64i8_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu @@ -254,7 +256,7 @@ entry: %a = call @llvm.riscv.viota.nxv64i8( %0, - i32 %1) + iXLen %1) ret %a } @@ -263,9 +265,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv64i8_nxv64i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv64i8_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv64i8_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu @@ -276,16 +278,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv1i16( , - i32); + iXLen); -define @intrinsic_viota_m_nxv1i16_nxv1i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv1i16_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i16_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu @@ -294,7 +296,7 @@ entry: %a = call @llvm.riscv.viota.nxv1i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -303,9 +305,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv1i16_nxv1i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv1i16_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i16_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu @@ -316,16 +318,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv2i16( , - i32); + iXLen); -define @intrinsic_viota_m_nxv2i16_nxv2i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv2i16_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv2i16_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu @@ -334,7 +336,7 @@ entry: %a = call @llvm.riscv.viota.nxv2i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -343,9 +345,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv2i16_nxv2i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv2i16_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i16_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu @@ -356,16 +358,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv4i16( , - i32); + iXLen); -define @intrinsic_viota_m_nxv4i16_nxv4i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv4i16_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv4i16_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu @@ -374,7 +376,7 @@ entry: %a = call @llvm.riscv.viota.nxv4i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -383,9 +385,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv4i16_nxv4i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv4i16_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i16_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu @@ -396,16 +398,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv8i16( , - i32); + iXLen); -define @intrinsic_viota_m_nxv8i16_nxv8i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv8i16_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv8i16_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu @@ -414,7 +416,7 @@ entry: %a = call @llvm.riscv.viota.nxv8i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -423,9 +425,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv8i16_nxv8i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv8i16_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i16_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu @@ -436,16 +438,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv16i16( , - i32); + iXLen); -define @intrinsic_viota_m_nxv16i16_nxv16i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv16i16_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv16i16_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu @@ -454,7 +456,7 @@ entry: %a = call @llvm.riscv.viota.nxv16i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -463,9 +465,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv16i16_nxv16i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv16i16_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i16_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu @@ -476,16 +478,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv32i16( , - i32); + iXLen); -define @intrinsic_viota_m_nxv32i16_nxv32i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv32i16_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv32i16_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu @@ -494,7 +496,7 @@ entry: %a = call @llvm.riscv.viota.nxv32i16( %0, - i32 %1) + iXLen %1) ret %a } @@ -503,9 +505,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv32i16_nxv32i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv32i16_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i16_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu @@ -516,16 +518,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv1i32( , - i32); + iXLen); -define @intrinsic_viota_m_nxv1i32_nxv1i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv1i32_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i32_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu @@ -534,7 +536,7 @@ entry: %a = call @llvm.riscv.viota.nxv1i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -543,9 +545,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv1i32_nxv1i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv1i32_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i32_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu @@ -556,16 +558,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv2i32( , - i32); + iXLen); -define @intrinsic_viota_m_nxv2i32_nxv2i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv2i32_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv2i32_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu @@ -574,7 +576,7 @@ entry: %a = call @llvm.riscv.viota.nxv2i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -583,9 +585,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv2i32_nxv2i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv2i32_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i32_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu @@ -596,16 +598,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv4i32( , - i32); + iXLen); -define @intrinsic_viota_m_nxv4i32_nxv4i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv4i32_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv4i32_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu @@ -614,7 +616,7 @@ entry: %a = call @llvm.riscv.viota.nxv4i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -623,9 +625,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv4i32_nxv4i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv4i32_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i32_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu @@ -636,16 +638,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv8i32( , - i32); + iXLen); -define @intrinsic_viota_m_nxv8i32_nxv8i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv8i32_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv8i32_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu @@ -654,7 +656,7 @@ entry: %a = call @llvm.riscv.viota.nxv8i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -663,9 +665,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv8i32_nxv8i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv8i32_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i32_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu @@ -676,16 +678,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv16i32( , - i32); + iXLen); -define @intrinsic_viota_m_nxv16i32_nxv16i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv16i32_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv16i32_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -694,7 +696,7 @@ entry: %a = call @llvm.riscv.viota.nxv16i32( %0, - i32 %1) + iXLen %1) ret %a } @@ -703,9 +705,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv16i32_nxv16i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv16i32_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i32_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu @@ -716,16 +718,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv1i64( , - i32); + iXLen); -define @intrinsic_viota_m_nxv1i64_nxv1i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv1i64_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i64_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu @@ -734,7 +736,7 @@ entry: %a = call @llvm.riscv.viota.nxv1i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -743,9 +745,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv1i64_nxv1i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv1i64_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i64_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu @@ -756,16 +758,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv2i64( , - i32); + iXLen); -define @intrinsic_viota_m_nxv2i64_nxv2i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv2i64_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv2i64_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu @@ -774,7 +776,7 @@ entry: %a = call @llvm.riscv.viota.nxv2i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -783,9 +785,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv2i64_nxv2i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv2i64_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i64_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu @@ -796,16 +798,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv4i64( , - i32); + iXLen); -define @intrinsic_viota_m_nxv4i64_nxv4i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv4i64_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv4i64_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu @@ -814,7 +816,7 @@ entry: %a = call @llvm.riscv.viota.nxv4i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -823,9 +825,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv4i64_nxv4i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv4i64_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i64_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu @@ -836,16 +838,16 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } declare @llvm.riscv.viota.nxv8i64( , - i32); + iXLen); -define @intrinsic_viota_m_nxv8i64_nxv8i1( %0, i32 %1) nounwind { +define @intrinsic_viota_m_nxv8i64_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv8i64_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu @@ -854,7 +856,7 @@ entry: %a = call @llvm.riscv.viota.nxv8i64( %0, - i32 %1) + iXLen %1) ret %a } @@ -863,9 +865,9 @@ , , , - i32); + iXLen); -define @intrinsic_viota_mask_m_nxv8i64_nxv8i1( %0, %1, i32 %2) nounwind { +define @intrinsic_viota_mask_m_nxv8i64_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i64_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu @@ -876,7 +878,7 @@ %0, %1, %1, - i32 %2) + iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vlm-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlm-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlm-rv64.ll +++ /dev/null @@ -1,94 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s - -declare @llvm.riscv.vlm.nxv1i1(*, i64); - -define @intrinsic_vlm_v_nxv1i1(* %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vlm_v_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vlm.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vlm.nxv1i1(* %0, i64 %1) - ret %a -} - -declare @llvm.riscv.vlm.nxv2i1(*, i64); - -define @intrinsic_vlm_v_nxv2i1(* %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vlm_v_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vlm.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vlm.nxv2i1(* %0, i64 %1) - ret %a -} - -declare @llvm.riscv.vlm.nxv4i1(*, i64); - -define @intrinsic_vlm_v_nxv4i1(* %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vlm_v_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vlm.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vlm.nxv4i1(* %0, i64 %1) - ret %a -} - -declare @llvm.riscv.vlm.nxv8i1(*, i64); - -define @intrinsic_vlm_v_nxv8i1(* %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vlm_v_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vlm.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vlm.nxv8i1(* %0, i64 %1) - ret %a -} - -declare @llvm.riscv.vlm.nxv16i1(*, i64); - -define @intrinsic_vlm_v_nxv16i1(* %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vlm_v_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vlm.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vlm.nxv16i1(* %0, i64 %1) - ret %a -} - -declare @llvm.riscv.vlm.nxv32i1(*, i64); - -define @intrinsic_vlm_v_nxv32i1(* %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vlm_v_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vlm.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vlm.nxv32i1(* %0, i64 %1) - ret %a -} - -declare @llvm.riscv.vlm.nxv64i1(*, i64); - -define @intrinsic_vlm_v_nxv64i1(* %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vlm_v_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vlm.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vlm.nxv64i1(* %0, i64 %1) - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vlm-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlm.ll rename from llvm/test/CodeGen/RISCV/rvv/vlm-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vlm.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlm-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlm.ll @@ -1,94 +1,96 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s -declare @llvm.riscv.vlm.nxv1i1(*, i32); +declare @llvm.riscv.vlm.nxv1i1(*, iXLen); -define @intrinsic_vlm_v_nxv1i1(* %0, i32 %1) nounwind { +define @intrinsic_vlm_v_nxv1i1(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vlm.nxv1i1(* %0, i32 %1) + %a = call @llvm.riscv.vlm.nxv1i1(* %0, iXLen %1) ret %a } -declare @llvm.riscv.vlm.nxv2i1(*, i32); +declare @llvm.riscv.vlm.nxv2i1(*, iXLen); -define @intrinsic_vlm_v_nxv2i1(* %0, i32 %1) nounwind { +define @intrinsic_vlm_v_nxv2i1(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vlm.nxv2i1(* %0, i32 %1) + %a = call @llvm.riscv.vlm.nxv2i1(* %0, iXLen %1) ret %a } -declare @llvm.riscv.vlm.nxv4i1(*, i32); +declare @llvm.riscv.vlm.nxv4i1(*, iXLen); -define @intrinsic_vlm_v_nxv4i1(* %0, i32 %1) nounwind { +define @intrinsic_vlm_v_nxv4i1(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vlm.nxv4i1(* %0, i32 %1) + %a = call @llvm.riscv.vlm.nxv4i1(* %0, iXLen %1) ret %a } -declare @llvm.riscv.vlm.nxv8i1(*, i32); +declare @llvm.riscv.vlm.nxv8i1(*, iXLen); -define @intrinsic_vlm_v_nxv8i1(* %0, i32 %1) nounwind { +define @intrinsic_vlm_v_nxv8i1(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vlm.nxv8i1(* %0, i32 %1) + %a = call @llvm.riscv.vlm.nxv8i1(* %0, iXLen %1) ret %a } -declare @llvm.riscv.vlm.nxv16i1(*, i32); +declare @llvm.riscv.vlm.nxv16i1(*, iXLen); -define @intrinsic_vlm_v_nxv16i1(* %0, i32 %1) nounwind { +define @intrinsic_vlm_v_nxv16i1(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vlm.nxv16i1(* %0, i32 %1) + %a = call @llvm.riscv.vlm.nxv16i1(* %0, iXLen %1) ret %a } -declare @llvm.riscv.vlm.nxv32i1(*, i32); +declare @llvm.riscv.vlm.nxv32i1(*, iXLen); -define @intrinsic_vlm_v_nxv32i1(* %0, i32 %1) nounwind { +define @intrinsic_vlm_v_nxv32i1(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vlm.nxv32i1(* %0, i32 %1) + %a = call @llvm.riscv.vlm.nxv32i1(* %0, iXLen %1) ret %a } -declare @llvm.riscv.vlm.nxv64i1(*, i32); +declare @llvm.riscv.vlm.nxv64i1(*, iXLen); -define @intrinsic_vlm_v_nxv64i1(* %0, i32 %1) nounwind { +define @intrinsic_vlm_v_nxv64i1(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vlm.nxv64i1(* %0, i32 %1) + %a = call @llvm.riscv.vlm.nxv64i1(* %0, iXLen %1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll +++ /dev/null @@ -1,142 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmand.nxv1i1( - , - , - i64); - -define @intrinsic_vmand_mm_nxv1i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmand_mm_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmand.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmand.nxv1i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmand.nxv2i1( - , - , - i64); - -define @intrinsic_vmand_mm_nxv2i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmand_mm_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmand.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmand.nxv2i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmand.nxv4i1( - , - , - i64); - -define @intrinsic_vmand_mm_nxv4i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmand_mm_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmand.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmand.nxv4i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmand.nxv8i1( - , - , - i64); - -define @intrinsic_vmand_mm_nxv8i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmand_mm_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmand.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmand.nxv8i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmand.nxv16i1( - , - , - i64); - -define @intrinsic_vmand_mm_nxv16i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmand_mm_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmand.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmand.nxv16i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmand.nxv32i1( - , - , - i64); - -define @intrinsic_vmand_mm_nxv32i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmand_mm_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmand.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmand.nxv32i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmand.nxv64i1( - , - , - i64); - -define @intrinsic_vmand_mm_nxv64i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmand_mm_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmand.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmand.nxv64i1( - %0, - %1, - i64 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmand.ll rename from llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmand.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmand.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s declare @llvm.riscv.vmand.nxv1i1( , , - i32); + iXLen); -define @intrinsic_vmand_mm_nxv1i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmand_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -16,7 +18,7 @@ %a = call @llvm.riscv.vmand.nxv1i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -24,9 +26,9 @@ declare @llvm.riscv.vmand.nxv2i1( , , - i32); + iXLen); -define @intrinsic_vmand_mm_nxv2i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmand_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -36,7 +38,7 @@ %a = call @llvm.riscv.vmand.nxv2i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -44,9 +46,9 @@ declare @llvm.riscv.vmand.nxv4i1( , , - i32); + iXLen); -define @intrinsic_vmand_mm_nxv4i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmand_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -56,7 +58,7 @@ %a = call @llvm.riscv.vmand.nxv4i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -64,9 +66,9 @@ declare @llvm.riscv.vmand.nxv8i1( , , - i32); + iXLen); -define @intrinsic_vmand_mm_nxv8i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmand_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -76,7 +78,7 @@ %a = call @llvm.riscv.vmand.nxv8i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -84,9 +86,9 @@ declare @llvm.riscv.vmand.nxv16i1( , , - i32); + iXLen); -define @intrinsic_vmand_mm_nxv16i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmand_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -96,7 +98,7 @@ %a = call @llvm.riscv.vmand.nxv16i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -104,9 +106,9 @@ declare @llvm.riscv.vmand.nxv32i1( , , - i32); + iXLen); -define @intrinsic_vmand_mm_nxv32i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmand_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -116,7 +118,7 @@ %a = call @llvm.riscv.vmand.nxv32i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -124,9 +126,9 @@ declare @llvm.riscv.vmand.nxv64i1( , , - i32); + iXLen); -define @intrinsic_vmand_mm_nxv64i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmand_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu @@ -136,7 +138,7 @@ %a = call @llvm.riscv.vmand.nxv64i1( %0, %1, - i32 %2) + iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmandn-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmandn-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmandn-rv64.ll +++ /dev/null @@ -1,142 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmandn.nxv1i1( - , - , - i64); - -define @intrinsic_vmandn_mm_nxv1i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandn_mm_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandn.nxv1i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmandn.nxv2i1( - , - , - i64); - -define @intrinsic_vmandn_mm_nxv2i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandn_mm_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandn.nxv2i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmandn.nxv4i1( - , - , - i64); - -define @intrinsic_vmandn_mm_nxv4i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandn_mm_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandn.nxv4i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmandn.nxv8i1( - , - , - i64); - -define @intrinsic_vmandn_mm_nxv8i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandn_mm_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandn.nxv8i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmandn.nxv16i1( - , - , - i64); - -define @intrinsic_vmandn_mm_nxv16i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandn_mm_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandn.nxv16i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmandn.nxv32i1( - , - , - i64); - -define @intrinsic_vmandn_mm_nxv32i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandn_mm_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandn.nxv32i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmandn.nxv64i1( - , - , - i64); - -define @intrinsic_vmandn_mm_nxv64i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandn_mm_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmandn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandn.nxv64i1( - %0, - %1, - i64 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmandn-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmandn.ll rename from llvm/test/CodeGen/RISCV/rvv/vmandn-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmandn.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmandn-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmandn.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s declare @llvm.riscv.vmandn.nxv1i1( , , - i32); + iXLen); -define @intrinsic_vmandn_mm_nxv1i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmandn_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -16,7 +18,7 @@ %a = call @llvm.riscv.vmandn.nxv1i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -24,9 +26,9 @@ declare @llvm.riscv.vmandn.nxv2i1( , , - i32); + iXLen); -define @intrinsic_vmandn_mm_nxv2i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmandn_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -36,7 +38,7 @@ %a = call @llvm.riscv.vmandn.nxv2i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -44,9 +46,9 @@ declare @llvm.riscv.vmandn.nxv4i1( , , - i32); + iXLen); -define @intrinsic_vmandn_mm_nxv4i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmandn_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -56,7 +58,7 @@ %a = call @llvm.riscv.vmandn.nxv4i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -64,9 +66,9 @@ declare @llvm.riscv.vmandn.nxv8i1( , , - i32); + iXLen); -define @intrinsic_vmandn_mm_nxv8i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmandn_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -76,7 +78,7 @@ %a = call @llvm.riscv.vmandn.nxv8i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -84,9 +86,9 @@ declare @llvm.riscv.vmandn.nxv16i1( , , - i32); + iXLen); -define @intrinsic_vmandn_mm_nxv16i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmandn_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -96,7 +98,7 @@ %a = call @llvm.riscv.vmandn.nxv16i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -104,9 +106,9 @@ declare @llvm.riscv.vmandn.nxv32i1( , , - i32); + iXLen); -define @intrinsic_vmandn_mm_nxv32i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmandn_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -116,7 +118,7 @@ %a = call @llvm.riscv.vmandn.nxv32i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -124,9 +126,9 @@ declare @llvm.riscv.vmandn.nxv64i1( , , - i32); + iXLen); -define @intrinsic_vmandn_mm_nxv64i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmandn_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu @@ -136,7 +138,7 @@ %a = call @llvm.riscv.vmandn.nxv64i1( %0, %1, - i32 %2) + iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmclr-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmclr-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmclr-rv64.ll +++ /dev/null @@ -1,114 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmclr.nxv1i1( - i64); - -define @intrinsic_vmclr_m_pseudo_nxv1i1(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmclr.m v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmclr.nxv1i1( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vmclr.nxv2i1( - i64); - -define @intrinsic_vmclr_m_pseudo_nxv2i1(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmclr.m v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmclr.nxv2i1( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vmclr.nxv4i1( - i64); - -define @intrinsic_vmclr_m_pseudo_nxv4i1(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmclr.m v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmclr.nxv4i1( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vmclr.nxv8i1( - i64); - -define @intrinsic_vmclr_m_pseudo_nxv8i1(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmclr.m v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmclr.nxv8i1( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vmclr.nxv16i1( - i64); - -define @intrinsic_vmclr_m_pseudo_nxv16i1(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmclr.m v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmclr.nxv16i1( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vmclr.nxv32i1( - i64); - -define @intrinsic_vmclr_m_pseudo_nxv32i1(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmclr.m v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmclr.nxv32i1( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vmclr.nxv64i1( - i64); - -define @intrinsic_vmclr_m_pseudo_nxv64i1(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmclr.m v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmclr.nxv64i1( - i64 %0) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmclr-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmclr.ll rename from llvm/test/CodeGen/RISCV/rvv/vmclr-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmclr.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmclr-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmclr.ll @@ -1,10 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s declare @llvm.riscv.vmclr.nxv1i1( - i32); + iXLen); -define @intrinsic_vmclr_m_pseudo_nxv1i1(i32 %0) nounwind { +define @intrinsic_vmclr_m_pseudo_nxv1i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -12,15 +14,15 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmclr.nxv1i1( - i32 %0) + iXLen %0) ret %a } declare @llvm.riscv.vmclr.nxv2i1( - i32); + iXLen); -define @intrinsic_vmclr_m_pseudo_nxv2i1(i32 %0) nounwind { +define @intrinsic_vmclr_m_pseudo_nxv2i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -28,15 +30,15 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmclr.nxv2i1( - i32 %0) + iXLen %0) ret %a } declare @llvm.riscv.vmclr.nxv4i1( - i32); + iXLen); -define @intrinsic_vmclr_m_pseudo_nxv4i1(i32 %0) nounwind { +define @intrinsic_vmclr_m_pseudo_nxv4i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -44,15 +46,15 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmclr.nxv4i1( - i32 %0) + iXLen %0) ret %a } declare @llvm.riscv.vmclr.nxv8i1( - i32); + iXLen); -define @intrinsic_vmclr_m_pseudo_nxv8i1(i32 %0) nounwind { +define @intrinsic_vmclr_m_pseudo_nxv8i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -60,15 +62,15 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmclr.nxv8i1( - i32 %0) + iXLen %0) ret %a } declare @llvm.riscv.vmclr.nxv16i1( - i32); + iXLen); -define @intrinsic_vmclr_m_pseudo_nxv16i1(i32 %0) nounwind { +define @intrinsic_vmclr_m_pseudo_nxv16i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -76,15 +78,15 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmclr.nxv16i1( - i32 %0) + iXLen %0) ret %a } declare @llvm.riscv.vmclr.nxv32i1( - i32); + iXLen); -define @intrinsic_vmclr_m_pseudo_nxv32i1(i32 %0) nounwind { +define @intrinsic_vmclr_m_pseudo_nxv32i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -92,15 +94,15 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmclr.nxv32i1( - i32 %0) + iXLen %0) ret %a } declare @llvm.riscv.vmclr.nxv64i1( - i32); + iXLen); -define @intrinsic_vmclr_m_pseudo_nxv64i1(i32 %0) nounwind { +define @intrinsic_vmclr_m_pseudo_nxv64i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu @@ -108,7 +110,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmclr.nxv64i1( - i32 %0) + iXLen %0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll +++ /dev/null @@ -1,142 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmnand.nxv1i1( - , - , - i64); - -define @intrinsic_vmnand_mm_nxv1i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmnand_mm_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmnand.nxv1i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmnand.nxv2i1( - , - , - i64); - -define @intrinsic_vmnand_mm_nxv2i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmnand_mm_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmnand.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmnand.nxv2i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmnand.nxv4i1( - , - , - i64); - -define @intrinsic_vmnand_mm_nxv4i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmnand_mm_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmnand.nxv4i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmnand.nxv8i1( - , - , - i64); - -define @intrinsic_vmnand_mm_nxv8i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmnand_mm_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmnand.nxv8i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmnand.nxv16i1( - , - , - i64); - -define @intrinsic_vmnand_mm_nxv16i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmnand_mm_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmnand.nxv16i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmnand.nxv32i1( - , - , - i64); - -define @intrinsic_vmnand_mm_nxv32i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmnand_mm_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmnand.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmnand.nxv32i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmnand.nxv64i1( - , - , - i64); - -define @intrinsic_vmnand_mm_nxv64i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmnand_mm_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmnand.nxv64i1( - %0, - %1, - i64 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmnand.ll rename from llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmnand.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmnand.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s declare @llvm.riscv.vmnand.nxv1i1( , , - i32); + iXLen); -define @intrinsic_vmnand_mm_nxv1i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmnand_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -16,7 +18,7 @@ %a = call @llvm.riscv.vmnand.nxv1i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -24,9 +26,9 @@ declare @llvm.riscv.vmnand.nxv2i1( , , - i32); + iXLen); -define @intrinsic_vmnand_mm_nxv2i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmnand_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -36,7 +38,7 @@ %a = call @llvm.riscv.vmnand.nxv2i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -44,9 +46,9 @@ declare @llvm.riscv.vmnand.nxv4i1( , , - i32); + iXLen); -define @intrinsic_vmnand_mm_nxv4i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmnand_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -56,7 +58,7 @@ %a = call @llvm.riscv.vmnand.nxv4i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -64,9 +66,9 @@ declare @llvm.riscv.vmnand.nxv8i1( , , - i32); + iXLen); -define @intrinsic_vmnand_mm_nxv8i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmnand_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -76,7 +78,7 @@ %a = call @llvm.riscv.vmnand.nxv8i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -84,9 +86,9 @@ declare @llvm.riscv.vmnand.nxv16i1( , , - i32); + iXLen); -define @intrinsic_vmnand_mm_nxv16i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmnand_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -96,7 +98,7 @@ %a = call @llvm.riscv.vmnand.nxv16i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -104,9 +106,9 @@ declare @llvm.riscv.vmnand.nxv32i1( , , - i32); + iXLen); -define @intrinsic_vmnand_mm_nxv32i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmnand_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -116,7 +118,7 @@ %a = call @llvm.riscv.vmnand.nxv32i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -124,9 +126,9 @@ declare @llvm.riscv.vmnand.nxv64i1( , , - i32); + iXLen); -define @intrinsic_vmnand_mm_nxv64i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmnand_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu @@ -136,7 +138,7 @@ %a = call @llvm.riscv.vmnand.nxv64i1( %0, %1, - i32 %2) + iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll +++ /dev/null @@ -1,142 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmnor.nxv1i1( - , - , - i64); - -define @intrinsic_vmnor_mm_nxv1i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmnor_mm_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmnor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmnor.nxv1i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmnor.nxv2i1( - , - , - i64); - -define @intrinsic_vmnor_mm_nxv2i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmnor_mm_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmnor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmnor.nxv2i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmnor.nxv4i1( - , - , - i64); - -define @intrinsic_vmnor_mm_nxv4i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmnor_mm_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmnor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmnor.nxv4i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmnor.nxv8i1( - , - , - i64); - -define @intrinsic_vmnor_mm_nxv8i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmnor_mm_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmnor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmnor.nxv8i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmnor.nxv16i1( - , - , - i64); - -define @intrinsic_vmnor_mm_nxv16i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmnor_mm_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmnor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmnor.nxv16i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmnor.nxv32i1( - , - , - i64); - -define @intrinsic_vmnor_mm_nxv32i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmnor_mm_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmnor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmnor.nxv32i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmnor.nxv64i1( - , - , - i64); - -define @intrinsic_vmnor_mm_nxv64i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmnor_mm_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmnor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmnor.nxv64i1( - %0, - %1, - i64 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmnor.ll rename from llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmnor.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmnor.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s declare @llvm.riscv.vmnor.nxv1i1( , , - i32); + iXLen); -define @intrinsic_vmnor_mm_nxv1i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmnor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -16,7 +18,7 @@ %a = call @llvm.riscv.vmnor.nxv1i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -24,9 +26,9 @@ declare @llvm.riscv.vmnor.nxv2i1( , , - i32); + iXLen); -define @intrinsic_vmnor_mm_nxv2i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmnor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -36,7 +38,7 @@ %a = call @llvm.riscv.vmnor.nxv2i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -44,9 +46,9 @@ declare @llvm.riscv.vmnor.nxv4i1( , , - i32); + iXLen); -define @intrinsic_vmnor_mm_nxv4i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmnor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -56,7 +58,7 @@ %a = call @llvm.riscv.vmnor.nxv4i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -64,9 +66,9 @@ declare @llvm.riscv.vmnor.nxv8i1( , , - i32); + iXLen); -define @intrinsic_vmnor_mm_nxv8i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmnor_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -76,7 +78,7 @@ %a = call @llvm.riscv.vmnor.nxv8i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -84,9 +86,9 @@ declare @llvm.riscv.vmnor.nxv16i1( , , - i32); + iXLen); -define @intrinsic_vmnor_mm_nxv16i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmnor_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -96,7 +98,7 @@ %a = call @llvm.riscv.vmnor.nxv16i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -104,9 +106,9 @@ declare @llvm.riscv.vmnor.nxv32i1( , , - i32); + iXLen); -define @intrinsic_vmnor_mm_nxv32i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmnor_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -116,7 +118,7 @@ %a = call @llvm.riscv.vmnor.nxv32i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -124,9 +126,9 @@ declare @llvm.riscv.vmnor.nxv64i1( , , - i32); + iXLen); -define @intrinsic_vmnor_mm_nxv64i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmnor_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu @@ -136,7 +138,7 @@ %a = call @llvm.riscv.vmnor.nxv64i1( %0, %1, - i32 %2) + iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll +++ /dev/null @@ -1,142 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmor.nxv1i1( - , - , - i64); - -define @intrinsic_vmor_mm_nxv1i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmor_mm_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmor.nxv1i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmor.nxv2i1( - , - , - i64); - -define @intrinsic_vmor_mm_nxv2i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmor_mm_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmor.nxv2i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmor.nxv4i1( - , - , - i64); - -define @intrinsic_vmor_mm_nxv4i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmor_mm_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmor.nxv4i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmor.nxv8i1( - , - , - i64); - -define @intrinsic_vmor_mm_nxv8i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmor_mm_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmor.nxv8i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmor.nxv16i1( - , - , - i64); - -define @intrinsic_vmor_mm_nxv16i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmor_mm_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmor.nxv16i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmor.nxv32i1( - , - , - i64); - -define @intrinsic_vmor_mm_nxv32i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmor_mm_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmor.nxv32i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmor.nxv64i1( - , - , - i64); - -define @intrinsic_vmor_mm_nxv64i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmor_mm_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmor.nxv64i1( - %0, - %1, - i64 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmor.ll rename from llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmor.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmor.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s declare @llvm.riscv.vmor.nxv1i1( , , - i32); + iXLen); -define @intrinsic_vmor_mm_nxv1i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -16,7 +18,7 @@ %a = call @llvm.riscv.vmor.nxv1i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -24,9 +26,9 @@ declare @llvm.riscv.vmor.nxv2i1( , , - i32); + iXLen); -define @intrinsic_vmor_mm_nxv2i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -36,7 +38,7 @@ %a = call @llvm.riscv.vmor.nxv2i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -44,9 +46,9 @@ declare @llvm.riscv.vmor.nxv4i1( , , - i32); + iXLen); -define @intrinsic_vmor_mm_nxv4i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -56,7 +58,7 @@ %a = call @llvm.riscv.vmor.nxv4i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -64,9 +66,9 @@ declare @llvm.riscv.vmor.nxv8i1( , , - i32); + iXLen); -define @intrinsic_vmor_mm_nxv8i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmor_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -76,7 +78,7 @@ %a = call @llvm.riscv.vmor.nxv8i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -84,9 +86,9 @@ declare @llvm.riscv.vmor.nxv16i1( , , - i32); + iXLen); -define @intrinsic_vmor_mm_nxv16i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmor_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -96,7 +98,7 @@ %a = call @llvm.riscv.vmor.nxv16i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -104,9 +106,9 @@ declare @llvm.riscv.vmor.nxv32i1( , , - i32); + iXLen); -define @intrinsic_vmor_mm_nxv32i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmor_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -116,7 +118,7 @@ %a = call @llvm.riscv.vmor.nxv32i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -124,9 +126,9 @@ declare @llvm.riscv.vmor.nxv64i1( , , - i32); + iXLen); -define @intrinsic_vmor_mm_nxv64i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmor_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu @@ -136,7 +138,7 @@ %a = call @llvm.riscv.vmor.nxv64i1( %0, %1, - i32 %2) + iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmorn-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmorn-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmorn-rv64.ll +++ /dev/null @@ -1,142 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmorn.nxv1i1( - , - , - i64); - -define @intrinsic_vmorn_mm_nxv1i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmorn_mm_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmorn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmorn.nxv1i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmorn.nxv2i1( - , - , - i64); - -define @intrinsic_vmorn_mm_nxv2i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmorn_mm_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmorn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmorn.nxv2i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmorn.nxv4i1( - , - , - i64); - -define @intrinsic_vmorn_mm_nxv4i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmorn_mm_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmorn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmorn.nxv4i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmorn.nxv8i1( - , - , - i64); - -define @intrinsic_vmorn_mm_nxv8i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmorn_mm_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmorn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmorn.nxv8i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmorn.nxv16i1( - , - , - i64); - -define @intrinsic_vmorn_mm_nxv16i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmorn_mm_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmorn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmorn.nxv16i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmorn.nxv32i1( - , - , - i64); - -define @intrinsic_vmorn_mm_nxv32i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmorn_mm_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmorn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmorn.nxv32i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmorn.nxv64i1( - , - , - i64); - -define @intrinsic_vmorn_mm_nxv64i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmorn_mm_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmorn.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmorn.nxv64i1( - %0, - %1, - i64 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmorn-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmorn.ll rename from llvm/test/CodeGen/RISCV/rvv/vmorn-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmorn.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmorn-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmorn.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s declare @llvm.riscv.vmorn.nxv1i1( , , - i32); + iXLen); -define @intrinsic_vmorn_mm_nxv1i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmorn_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -16,7 +18,7 @@ %a = call @llvm.riscv.vmorn.nxv1i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -24,9 +26,9 @@ declare @llvm.riscv.vmorn.nxv2i1( , , - i32); + iXLen); -define @intrinsic_vmorn_mm_nxv2i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmorn_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -36,7 +38,7 @@ %a = call @llvm.riscv.vmorn.nxv2i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -44,9 +46,9 @@ declare @llvm.riscv.vmorn.nxv4i1( , , - i32); + iXLen); -define @intrinsic_vmorn_mm_nxv4i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmorn_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -56,7 +58,7 @@ %a = call @llvm.riscv.vmorn.nxv4i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -64,9 +66,9 @@ declare @llvm.riscv.vmorn.nxv8i1( , , - i32); + iXLen); -define @intrinsic_vmorn_mm_nxv8i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmorn_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -76,7 +78,7 @@ %a = call @llvm.riscv.vmorn.nxv8i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -84,9 +86,9 @@ declare @llvm.riscv.vmorn.nxv16i1( , , - i32); + iXLen); -define @intrinsic_vmorn_mm_nxv16i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmorn_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -96,7 +98,7 @@ %a = call @llvm.riscv.vmorn.nxv16i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -104,9 +106,9 @@ declare @llvm.riscv.vmorn.nxv32i1( , , - i32); + iXLen); -define @intrinsic_vmorn_mm_nxv32i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmorn_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -116,7 +118,7 @@ %a = call @llvm.riscv.vmorn.nxv32i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -124,9 +126,9 @@ declare @llvm.riscv.vmorn.nxv64i1( , , - i32); + iXLen); -define @intrinsic_vmorn_mm_nxv64i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmorn_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu @@ -136,7 +138,7 @@ %a = call @llvm.riscv.vmorn.nxv64i1( %0, %1, - i32 %2) + iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmset-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmset-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmset-rv64.ll +++ /dev/null @@ -1,114 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmset.nxv1i1( - i64); - -define @intrinsic_vmset_m_pseudo_nxv1i1(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmset.nxv1i1( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vmset.nxv2i1( - i64); - -define @intrinsic_vmset_m_pseudo_nxv2i1(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmset.nxv2i1( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vmset.nxv4i1( - i64); - -define @intrinsic_vmset_m_pseudo_nxv4i1(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmset.nxv4i1( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vmset.nxv8i1( - i64); - -define @intrinsic_vmset_m_pseudo_nxv8i1(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmset.nxv8i1( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vmset.nxv16i1( - i64); - -define @intrinsic_vmset_m_pseudo_nxv16i1(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmset.nxv16i1( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vmset.nxv32i1( - i64); - -define @intrinsic_vmset_m_pseudo_nxv32i1(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmset.nxv32i1( - i64 %0) - - ret %a -} - -declare @llvm.riscv.vmset.nxv64i1( - i64); - -define @intrinsic_vmset_m_pseudo_nxv64i1(i64 %0) nounwind { -; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmset.nxv64i1( - i64 %0) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmset-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmset.ll rename from llvm/test/CodeGen/RISCV/rvv/vmset-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmset.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmset-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmset.ll @@ -1,10 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s declare @llvm.riscv.vmset.nxv1i1( - i32); + iXLen); -define @intrinsic_vmset_m_pseudo_nxv1i1(i32 %0) nounwind { +define @intrinsic_vmset_m_pseudo_nxv1i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -12,15 +14,15 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmset.nxv1i1( - i32 %0) + iXLen %0) ret %a } declare @llvm.riscv.vmset.nxv2i1( - i32); + iXLen); -define @intrinsic_vmset_m_pseudo_nxv2i1(i32 %0) nounwind { +define @intrinsic_vmset_m_pseudo_nxv2i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -28,15 +30,15 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmset.nxv2i1( - i32 %0) + iXLen %0) ret %a } declare @llvm.riscv.vmset.nxv4i1( - i32); + iXLen); -define @intrinsic_vmset_m_pseudo_nxv4i1(i32 %0) nounwind { +define @intrinsic_vmset_m_pseudo_nxv4i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -44,15 +46,15 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmset.nxv4i1( - i32 %0) + iXLen %0) ret %a } declare @llvm.riscv.vmset.nxv8i1( - i32); + iXLen); -define @intrinsic_vmset_m_pseudo_nxv8i1(i32 %0) nounwind { +define @intrinsic_vmset_m_pseudo_nxv8i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -60,15 +62,15 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmset.nxv8i1( - i32 %0) + iXLen %0) ret %a } declare @llvm.riscv.vmset.nxv16i1( - i32); + iXLen); -define @intrinsic_vmset_m_pseudo_nxv16i1(i32 %0) nounwind { +define @intrinsic_vmset_m_pseudo_nxv16i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -76,15 +78,15 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmset.nxv16i1( - i32 %0) + iXLen %0) ret %a } declare @llvm.riscv.vmset.nxv32i1( - i32); + iXLen); -define @intrinsic_vmset_m_pseudo_nxv32i1(i32 %0) nounwind { +define @intrinsic_vmset_m_pseudo_nxv32i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -92,15 +94,15 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmset.nxv32i1( - i32 %0) + iXLen %0) ret %a } declare @llvm.riscv.vmset.nxv64i1( - i32); + iXLen); -define @intrinsic_vmset_m_pseudo_nxv64i1(i32 %0) nounwind { +define @intrinsic_vmset_m_pseudo_nxv64i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu @@ -108,7 +110,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmset.nxv64i1( - i32 %0) + iXLen %0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll +++ /dev/null @@ -1,296 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmsif.nxv1i1( - , - i64); - -define @intrinsic_vmsif_m_nxv1i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsif_m_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmsif.m v8, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsif.nxv1i1( - %0, - i64 %1) - ret %a -} - -declare @llvm.riscv.vmsif.mask.nxv1i1( - , - , - , - i64); - -define @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsif.m v10, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsif.mask.nxv1i1( - %0, - %1, - %2, - i64 %3) - ret %a -} - -declare @llvm.riscv.vmsif.nxv2i1( - , - i64); - -define @intrinsic_vmsif_m_nxv2i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsif_m_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmsif.m v8, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsif.nxv2i1( - %0, - i64 %1) - ret %a -} - -declare @llvm.riscv.vmsif.mask.nxv2i1( - , - , - , - i64); - -define @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsif.m v10, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsif.mask.nxv2i1( - %0, - %1, - %2, - i64 %3) - ret %a -} - -declare @llvm.riscv.vmsif.nxv4i1( - , - i64); - -define @intrinsic_vmsif_m_nxv4i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsif_m_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmsif.m v8, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsif.nxv4i1( - %0, - i64 %1) - ret %a -} - -declare @llvm.riscv.vmsif.mask.nxv4i1( - , - , - , - i64); - -define @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsif.m v10, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsif.mask.nxv4i1( - %0, - %1, - %2, - i64 %3) - ret %a -} - -declare @llvm.riscv.vmsif.nxv8i1( - , - i64); - -define @intrinsic_vmsif_m_nxv8i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsif_m_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmsif.m v8, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsif.nxv8i1( - %0, - i64 %1) - ret %a -} - -declare @llvm.riscv.vmsif.mask.nxv8i1( - , - , - , - i64); - -define @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsif.m v10, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsif.mask.nxv8i1( - %0, - %1, - %2, - i64 %3) - ret %a -} - -declare @llvm.riscv.vmsif.nxv16i1( - , - i64); - -define @intrinsic_vmsif_m_nxv16i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsif_m_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmsif.m v8, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsif.nxv16i1( - %0, - i64 %1) - ret %a -} - -declare @llvm.riscv.vmsif.mask.nxv16i1( - , - , - , - i64); - -define @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsif.m v10, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsif.mask.nxv16i1( - %0, - %1, - %2, - i64 %3) - ret %a -} - -declare @llvm.riscv.vmsif.nxv32i1( - , - i64); - -define @intrinsic_vmsif_m_nxv32i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsif_m_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmsif.m v8, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsif.nxv32i1( - %0, - i64 %1) - ret %a -} - -declare @llvm.riscv.vmsif.mask.nxv32i1( - , - , - , - i64); - -define @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsif.m v10, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsif.mask.nxv32i1( - %0, - %1, - %2, - i64 %3) - ret %a -} - -declare @llvm.riscv.vmsif.nxv64i1( - , - i64); - -define @intrinsic_vmsif_m_nxv64i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsif_m_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmsif.m v8, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsif.nxv64i1( - %0, - i64 %1) - ret %a -} - -declare @llvm.riscv.vmsif.mask.nxv64i1( - , - , - , - i64); - -define @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsif.m v10, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsif.mask.nxv64i1( - %0, - %1, - %2, - i64 %3) - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll rename from llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmsif.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s declare @llvm.riscv.vmsif.nxv1i1( , - i32); + iXLen); -define @intrinsic_vmsif_m_nxv1i1( %0, i32 %1) nounwind { +define @intrinsic_vmsif_m_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -15,7 +17,7 @@ entry: %a = call @llvm.riscv.vmsif.nxv1i1( %0, - i32 %1) + iXLen %1) ret %a } @@ -23,9 +25,9 @@ , , , - i32); + iXLen); -define @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -39,15 +41,15 @@ %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vmsif.nxv2i1( , - i32); + iXLen); -define @intrinsic_vmsif_m_nxv2i1( %0, i32 %1) nounwind { +define @intrinsic_vmsif_m_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -57,7 +59,7 @@ entry: %a = call @llvm.riscv.vmsif.nxv2i1( %0, - i32 %1) + iXLen %1) ret %a } @@ -65,9 +67,9 @@ , , , - i32); + iXLen); -define @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -81,15 +83,15 @@ %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vmsif.nxv4i1( , - i32); + iXLen); -define @intrinsic_vmsif_m_nxv4i1( %0, i32 %1) nounwind { +define @intrinsic_vmsif_m_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -99,7 +101,7 @@ entry: %a = call @llvm.riscv.vmsif.nxv4i1( %0, - i32 %1) + iXLen %1) ret %a } @@ -107,9 +109,9 @@ , , , - i32); + iXLen); -define @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -123,15 +125,15 @@ %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vmsif.nxv8i1( , - i32); + iXLen); -define @intrinsic_vmsif_m_nxv8i1( %0, i32 %1) nounwind { +define @intrinsic_vmsif_m_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -141,7 +143,7 @@ entry: %a = call @llvm.riscv.vmsif.nxv8i1( %0, - i32 %1) + iXLen %1) ret %a } @@ -149,9 +151,9 @@ , , , - i32); + iXLen); -define @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -165,15 +167,15 @@ %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vmsif.nxv16i1( , - i32); + iXLen); -define @intrinsic_vmsif_m_nxv16i1( %0, i32 %1) nounwind { +define @intrinsic_vmsif_m_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -183,7 +185,7 @@ entry: %a = call @llvm.riscv.vmsif.nxv16i1( %0, - i32 %1) + iXLen %1) ret %a } @@ -191,9 +193,9 @@ , , , - i32); + iXLen); -define @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -207,15 +209,15 @@ %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vmsif.nxv32i1( , - i32); + iXLen); -define @intrinsic_vmsif_m_nxv32i1( %0, i32 %1) nounwind { +define @intrinsic_vmsif_m_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -225,7 +227,7 @@ entry: %a = call @llvm.riscv.vmsif.nxv32i1( %0, - i32 %1) + iXLen %1) ret %a } @@ -233,9 +235,9 @@ , , , - i32); + iXLen); -define @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -249,15 +251,15 @@ %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vmsif.nxv64i1( , - i32); + iXLen); -define @intrinsic_vmsif_m_nxv64i1( %0, i32 %1) nounwind { +define @intrinsic_vmsif_m_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu @@ -267,7 +269,7 @@ entry: %a = call @llvm.riscv.vmsif.nxv64i1( %0, - i32 %1) + iXLen %1) ret %a } @@ -275,9 +277,9 @@ , , , - i32); + iXLen); -define @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -291,6 +293,6 @@ %0, %1, %2, - i32 %3) + iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll +++ /dev/null @@ -1,296 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmsof.nxv1i1( - , - i64); - -define @intrinsic_vmsof_m_nxv1i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsof_m_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmsof.m v8, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsof.nxv1i1( - %0, - i64 %1) - ret %a -} - -declare @llvm.riscv.vmsof.mask.nxv1i1( - , - , - , - i64); - -define @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsof.m v10, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsof.mask.nxv1i1( - %0, - %1, - %2, - i64 %3) - ret %a -} - -declare @llvm.riscv.vmsof.nxv2i1( - , - i64); - -define @intrinsic_vmsof_m_nxv2i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsof_m_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmsof.m v8, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsof.nxv2i1( - %0, - i64 %1) - ret %a -} - -declare @llvm.riscv.vmsof.mask.nxv2i1( - , - , - , - i64); - -define @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsof.m v10, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsof.mask.nxv2i1( - %0, - %1, - %2, - i64 %3) - ret %a -} - -declare @llvm.riscv.vmsof.nxv4i1( - , - i64); - -define @intrinsic_vmsof_m_nxv4i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsof_m_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmsof.m v8, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsof.nxv4i1( - %0, - i64 %1) - ret %a -} - -declare @llvm.riscv.vmsof.mask.nxv4i1( - , - , - , - i64); - -define @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsof.m v10, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsof.mask.nxv4i1( - %0, - %1, - %2, - i64 %3) - ret %a -} - -declare @llvm.riscv.vmsof.nxv8i1( - , - i64); - -define @intrinsic_vmsof_m_nxv8i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsof_m_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmsof.m v8, v0 -; CHECK-NEXT: vmv.v.v v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsof.nxv8i1( - %0, - i64 %1) - ret %a -} - -declare @llvm.riscv.vmsof.mask.nxv8i1( - , - , - , - i64); - -define @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsof.m v10, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsof.mask.nxv8i1( - %0, - %1, - %2, - i64 %3) - ret %a -} - -declare @llvm.riscv.vmsof.nxv16i1( - , - i64); - -define @intrinsic_vmsof_m_nxv16i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsof_m_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmsof.m v8, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsof.nxv16i1( - %0, - i64 %1) - ret %a -} - -declare @llvm.riscv.vmsof.mask.nxv16i1( - , - , - , - i64); - -define @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsof.m v10, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsof.mask.nxv16i1( - %0, - %1, - %2, - i64 %3) - ret %a -} - -declare @llvm.riscv.vmsof.nxv32i1( - , - i64); - -define @intrinsic_vmsof_m_nxv32i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsof_m_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmsof.m v8, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsof.nxv32i1( - %0, - i64 %1) - ret %a -} - -declare @llvm.riscv.vmsof.mask.nxv32i1( - , - , - , - i64); - -define @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsof.m v10, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsof.mask.nxv32i1( - %0, - %1, - %2, - i64 %3) - ret %a -} - -declare @llvm.riscv.vmsof.nxv64i1( - , - i64); - -define @intrinsic_vmsof_m_nxv64i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vmsof_m_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmsof.m v8, v0 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsof.nxv64i1( - %0, - i64 %1) - ret %a -} - -declare @llvm.riscv.vmsof.mask.nxv64i1( - , - , - , - i64); - -define @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu -; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsof.m v10, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmsof.mask.nxv64i1( - %0, - %1, - %2, - i64 %3) - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll rename from llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmsof.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s declare @llvm.riscv.vmsof.nxv1i1( , - i32); + iXLen); -define @intrinsic_vmsof_m_nxv1i1( %0, i32 %1) nounwind { +define @intrinsic_vmsof_m_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -15,7 +17,7 @@ entry: %a = call @llvm.riscv.vmsof.nxv1i1( %0, - i32 %1) + iXLen %1) ret %a } @@ -23,9 +25,9 @@ , , , - i32); + iXLen); -define @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -39,15 +41,15 @@ %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vmsof.nxv2i1( , - i32); + iXLen); -define @intrinsic_vmsof_m_nxv2i1( %0, i32 %1) nounwind { +define @intrinsic_vmsof_m_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -57,7 +59,7 @@ entry: %a = call @llvm.riscv.vmsof.nxv2i1( %0, - i32 %1) + iXLen %1) ret %a } @@ -65,9 +67,9 @@ , , , - i32); + iXLen); -define @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -81,15 +83,15 @@ %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vmsof.nxv4i1( , - i32); + iXLen); -define @intrinsic_vmsof_m_nxv4i1( %0, i32 %1) nounwind { +define @intrinsic_vmsof_m_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -99,7 +101,7 @@ entry: %a = call @llvm.riscv.vmsof.nxv4i1( %0, - i32 %1) + iXLen %1) ret %a } @@ -107,9 +109,9 @@ , , , - i32); + iXLen); -define @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -123,15 +125,15 @@ %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vmsof.nxv8i1( , - i32); + iXLen); -define @intrinsic_vmsof_m_nxv8i1( %0, i32 %1) nounwind { +define @intrinsic_vmsof_m_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -141,7 +143,7 @@ entry: %a = call @llvm.riscv.vmsof.nxv8i1( %0, - i32 %1) + iXLen %1) ret %a } @@ -149,9 +151,9 @@ , , , - i32); + iXLen); -define @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -165,15 +167,15 @@ %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vmsof.nxv16i1( , - i32); + iXLen); -define @intrinsic_vmsof_m_nxv16i1( %0, i32 %1) nounwind { +define @intrinsic_vmsof_m_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -183,7 +185,7 @@ entry: %a = call @llvm.riscv.vmsof.nxv16i1( %0, - i32 %1) + iXLen %1) ret %a } @@ -191,9 +193,9 @@ , , , - i32); + iXLen); -define @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -207,15 +209,15 @@ %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vmsof.nxv32i1( , - i32); + iXLen); -define @intrinsic_vmsof_m_nxv32i1( %0, i32 %1) nounwind { +define @intrinsic_vmsof_m_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -225,7 +227,7 @@ entry: %a = call @llvm.riscv.vmsof.nxv32i1( %0, - i32 %1) + iXLen %1) ret %a } @@ -233,9 +235,9 @@ , , , - i32); + iXLen); -define @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -249,15 +251,15 @@ %0, %1, %2, - i32 %3) + iXLen %3) ret %a } declare @llvm.riscv.vmsof.nxv64i1( , - i32); + iXLen); -define @intrinsic_vmsof_m_nxv64i1( %0, i32 %1) nounwind { +define @intrinsic_vmsof_m_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu @@ -267,7 +269,7 @@ entry: %a = call @llvm.riscv.vmsof.nxv64i1( %0, - i32 %1) + iXLen %1) ret %a } @@ -275,9 +277,9 @@ , , , - i32); + iXLen); -define @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1( %0, %1, %2, i32 %3) nounwind { +define @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v10, v0 @@ -291,6 +293,6 @@ %0, %1, %2, - i32 %3) + iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll +++ /dev/null @@ -1,142 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmxnor.nxv1i1( - , - , - i64); - -define @intrinsic_vmxnor_mm_nxv1i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmxnor_mm_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmxnor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmxnor.nxv1i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmxnor.nxv2i1( - , - , - i64); - -define @intrinsic_vmxnor_mm_nxv2i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmxnor_mm_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmxnor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmxnor.nxv2i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmxnor.nxv4i1( - , - , - i64); - -define @intrinsic_vmxnor_mm_nxv4i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmxnor_mm_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmxnor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmxnor.nxv4i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmxnor.nxv8i1( - , - , - i64); - -define @intrinsic_vmxnor_mm_nxv8i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmxnor_mm_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmxnor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmxnor.nxv8i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmxnor.nxv16i1( - , - , - i64); - -define @intrinsic_vmxnor_mm_nxv16i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmxnor_mm_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmxnor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmxnor.nxv16i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmxnor.nxv32i1( - , - , - i64); - -define @intrinsic_vmxnor_mm_nxv32i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmxnor_mm_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmxnor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmxnor.nxv32i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmxnor.nxv64i1( - , - , - i64); - -define @intrinsic_vmxnor_mm_nxv64i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmxnor_mm_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmxnor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmxnor.nxv64i1( - %0, - %1, - i64 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmxnor.ll rename from llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmxnor.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmxnor.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s declare @llvm.riscv.vmxnor.nxv1i1( , , - i32); + iXLen); -define @intrinsic_vmxnor_mm_nxv1i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmxnor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -16,7 +18,7 @@ %a = call @llvm.riscv.vmxnor.nxv1i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -24,9 +26,9 @@ declare @llvm.riscv.vmxnor.nxv2i1( , , - i32); + iXLen); -define @intrinsic_vmxnor_mm_nxv2i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmxnor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -36,7 +38,7 @@ %a = call @llvm.riscv.vmxnor.nxv2i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -44,9 +46,9 @@ declare @llvm.riscv.vmxnor.nxv4i1( , , - i32); + iXLen); -define @intrinsic_vmxnor_mm_nxv4i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmxnor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -56,7 +58,7 @@ %a = call @llvm.riscv.vmxnor.nxv4i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -64,9 +66,9 @@ declare @llvm.riscv.vmxnor.nxv8i1( , , - i32); + iXLen); -define @intrinsic_vmxnor_mm_nxv8i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmxnor_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -76,7 +78,7 @@ %a = call @llvm.riscv.vmxnor.nxv8i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -84,9 +86,9 @@ declare @llvm.riscv.vmxnor.nxv16i1( , , - i32); + iXLen); -define @intrinsic_vmxnor_mm_nxv16i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmxnor_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -96,7 +98,7 @@ %a = call @llvm.riscv.vmxnor.nxv16i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -104,9 +106,9 @@ declare @llvm.riscv.vmxnor.nxv32i1( , , - i32); + iXLen); -define @intrinsic_vmxnor_mm_nxv32i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmxnor_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -116,7 +118,7 @@ %a = call @llvm.riscv.vmxnor.nxv32i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -124,9 +126,9 @@ declare @llvm.riscv.vmxnor.nxv64i1( , , - i32); + iXLen); -define @intrinsic_vmxnor_mm_nxv64i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmxnor_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu @@ -136,7 +138,7 @@ %a = call @llvm.riscv.vmxnor.nxv64i1( %0, %1, - i32 %2) + iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll +++ /dev/null @@ -1,142 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmxor.nxv1i1( - , - , - i64); - -define @intrinsic_vmxor_mm_nxv1i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmxor_mm_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmxor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmxor.nxv1i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmxor.nxv2i1( - , - , - i64); - -define @intrinsic_vmxor_mm_nxv2i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmxor_mm_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmxor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmxor.nxv2i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmxor.nxv4i1( - , - , - i64); - -define @intrinsic_vmxor_mm_nxv4i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmxor_mm_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmxor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmxor.nxv4i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmxor.nxv8i1( - , - , - i64); - -define @intrinsic_vmxor_mm_nxv8i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmxor_mm_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmxor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmxor.nxv8i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmxor.nxv16i1( - , - , - i64); - -define @intrinsic_vmxor_mm_nxv16i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmxor_mm_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmxor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmxor.nxv16i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmxor.nxv32i1( - , - , - i64); - -define @intrinsic_vmxor_mm_nxv32i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmxor_mm_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmxor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmxor.nxv32i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmxor.nxv64i1( - , - , - i64); - -define @intrinsic_vmxor_mm_nxv64i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmxor_mm_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmxor.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmxor.nxv64i1( - %0, - %1, - i64 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmxor.ll rename from llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vmxor.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmxor.ll @@ -1,12 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s declare @llvm.riscv.vmxor.nxv1i1( , , - i32); + iXLen); -define @intrinsic_vmxor_mm_nxv1i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmxor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu @@ -16,7 +18,7 @@ %a = call @llvm.riscv.vmxor.nxv1i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -24,9 +26,9 @@ declare @llvm.riscv.vmxor.nxv2i1( , , - i32); + iXLen); -define @intrinsic_vmxor_mm_nxv2i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmxor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -36,7 +38,7 @@ %a = call @llvm.riscv.vmxor.nxv2i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -44,9 +46,9 @@ declare @llvm.riscv.vmxor.nxv4i1( , , - i32); + iXLen); -define @intrinsic_vmxor_mm_nxv4i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmxor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -56,7 +58,7 @@ %a = call @llvm.riscv.vmxor.nxv4i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -64,9 +66,9 @@ declare @llvm.riscv.vmxor.nxv8i1( , , - i32); + iXLen); -define @intrinsic_vmxor_mm_nxv8i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmxor_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu @@ -76,7 +78,7 @@ %a = call @llvm.riscv.vmxor.nxv8i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -84,9 +86,9 @@ declare @llvm.riscv.vmxor.nxv16i1( , , - i32); + iXLen); -define @intrinsic_vmxor_mm_nxv16i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmxor_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu @@ -96,7 +98,7 @@ %a = call @llvm.riscv.vmxor.nxv16i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -104,9 +106,9 @@ declare @llvm.riscv.vmxor.nxv32i1( , , - i32); + iXLen); -define @intrinsic_vmxor_mm_nxv32i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmxor_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu @@ -116,7 +118,7 @@ %a = call @llvm.riscv.vmxor.nxv32i1( %0, %1, - i32 %2) + iXLen %2) ret %a } @@ -124,9 +126,9 @@ declare @llvm.riscv.vmxor.nxv64i1( , , - i32); + iXLen); -define @intrinsic_vmxor_mm_nxv64i1( %0, %1, i32 %2) nounwind { +define @intrinsic_vmxor_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu @@ -136,7 +138,7 @@ %a = call @llvm.riscv.vmxor.nxv64i1( %0, %1, - i32 %2) + iXLen %2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsm-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsm-rv64.ll +++ /dev/null @@ -1,137 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s - -declare void @llvm.riscv.vsm.nxv1i1(, *, i64); - -define void @intrinsic_vsm_v_nxv1i1( %0, * %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsm_v_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsm.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsm.nxv1i1( %0, * %1, i64 %2) - ret void -} - -declare void @llvm.riscv.vsm.nxv2i1(, *, i64); - -define void @intrinsic_vsm_v_nxv2i1( %0, * %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsm_v_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsm.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsm.nxv2i1( %0, * %1, i64 %2) - ret void -} - -declare void @llvm.riscv.vsm.nxv4i1(, *, i64); - -define void @intrinsic_vsm_v_nxv4i1( %0, * %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsm_v_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsm.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsm.nxv4i1( %0, * %1, i64 %2) - ret void -} - -declare void @llvm.riscv.vsm.nxv8i1(, *, i64); - -define void @intrinsic_vsm_v_nxv8i1( %0, * %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsm_v_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsm.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsm.nxv8i1( %0, * %1, i64 %2) - ret void -} - -declare void @llvm.riscv.vsm.nxv16i1(, *, i64); - -define void @intrinsic_vsm_v_nxv16i1( %0, * %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsm_v_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vsm.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsm.nxv16i1( %0, * %1, i64 %2) - ret void -} - -declare void @llvm.riscv.vsm.nxv32i1(, *, i64); - -define void @intrinsic_vsm_v_nxv32i1( %0, * %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsm_v_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vsm.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsm.nxv32i1( %0, * %1, i64 %2) - ret void -} - -declare void @llvm.riscv.vsm.nxv64i1(, *, i64); - -define void @intrinsic_vsm_v_nxv64i1( %0, * %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vsm_v_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vsm.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsm.nxv64i1( %0, * %1, i64 %2) - ret void -} - -declare @llvm.riscv.vmseq.nxv1i16( - , - , - i64); - -; Make sure we can use the vsetvli from the producing instruction. -define void @test_vsetvli_i16( %0, %1, * %2, i64 %3) nounwind { -; CHECK-LABEL: test_vsetvli_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vmseq.vv v8, v8, v9 -; CHECK-NEXT: vsm.v v8, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv1i16( - %0, - %1, - i64 %3) - call void @llvm.riscv.vsm.nxv1i1( %a, * %2, i64 %3) - ret void -} - -declare @llvm.riscv.vmseq.nxv1i32( - , - , - i64); - -define void @test_vsetvli_i32( %0, %1, * %2, i64 %3) nounwind { -; CHECK-LABEL: test_vsetvli_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vmseq.vv v8, v8, v9 -; CHECK-NEXT: vsm.v v8, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv1i32( - %0, - %1, - i64 %3) - call void @llvm.riscv.vsm.nxv1i1( %a, * %2, i64 %3) - ret void -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsm.ll rename from llvm/test/CodeGen/RISCV/rvv/vsm-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vsm.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsm-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsm.ll @@ -1,105 +1,107 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s -declare void @llvm.riscv.vsm.nxv1i1(, *, i32); +declare void @llvm.riscv.vsm.nxv1i1(, *, iXLen); -define void @intrinsic_vsm_v_nxv1i1( %0, * %1, i32 %2) nounwind { +define void @intrinsic_vsm_v_nxv1i1( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret entry: - call void @llvm.riscv.vsm.nxv1i1( %0, * %1, i32 %2) + call void @llvm.riscv.vsm.nxv1i1( %0, * %1, iXLen %2) ret void } -declare void @llvm.riscv.vsm.nxv2i1(, *, i32); +declare void @llvm.riscv.vsm.nxv2i1(, *, iXLen); -define void @intrinsic_vsm_v_nxv2i1( %0, * %1, i32 %2) nounwind { +define void @intrinsic_vsm_v_nxv2i1( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret entry: - call void @llvm.riscv.vsm.nxv2i1( %0, * %1, i32 %2) + call void @llvm.riscv.vsm.nxv2i1( %0, * %1, iXLen %2) ret void } -declare void @llvm.riscv.vsm.nxv4i1(, *, i32); +declare void @llvm.riscv.vsm.nxv4i1(, *, iXLen); -define void @intrinsic_vsm_v_nxv4i1( %0, * %1, i32 %2) nounwind { +define void @intrinsic_vsm_v_nxv4i1( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret entry: - call void @llvm.riscv.vsm.nxv4i1( %0, * %1, i32 %2) + call void @llvm.riscv.vsm.nxv4i1( %0, * %1, iXLen %2) ret void } -declare void @llvm.riscv.vsm.nxv8i1(, *, i32); +declare void @llvm.riscv.vsm.nxv8i1(, *, iXLen); -define void @intrinsic_vsm_v_nxv8i1( %0, * %1, i32 %2) nounwind { +define void @intrinsic_vsm_v_nxv8i1( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret entry: - call void @llvm.riscv.vsm.nxv8i1( %0, * %1, i32 %2) + call void @llvm.riscv.vsm.nxv8i1( %0, * %1, iXLen %2) ret void } -declare void @llvm.riscv.vsm.nxv16i1(, *, i32); +declare void @llvm.riscv.vsm.nxv16i1(, *, iXLen); -define void @intrinsic_vsm_v_nxv16i1( %0, * %1, i32 %2) nounwind { +define void @intrinsic_vsm_v_nxv16i1( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret entry: - call void @llvm.riscv.vsm.nxv16i1( %0, * %1, i32 %2) + call void @llvm.riscv.vsm.nxv16i1( %0, * %1, iXLen %2) ret void } -declare void @llvm.riscv.vsm.nxv32i1(, *, i32); +declare void @llvm.riscv.vsm.nxv32i1(, *, iXLen); -define void @intrinsic_vsm_v_nxv32i1( %0, * %1, i32 %2) nounwind { +define void @intrinsic_vsm_v_nxv32i1( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret entry: - call void @llvm.riscv.vsm.nxv32i1( %0, * %1, i32 %2) + call void @llvm.riscv.vsm.nxv32i1( %0, * %1, iXLen %2) ret void } -declare void @llvm.riscv.vsm.nxv64i1(, *, i32); +declare void @llvm.riscv.vsm.nxv64i1(, *, iXLen); -define void @intrinsic_vsm_v_nxv64i1( %0, * %1, i32 %2) nounwind { +define void @intrinsic_vsm_v_nxv64i1( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret entry: - call void @llvm.riscv.vsm.nxv64i1( %0, * %1, i32 %2) + call void @llvm.riscv.vsm.nxv64i1( %0, * %1, iXLen %2) ret void } declare @llvm.riscv.vmseq.nxv1i16( , , - i32); + iXLen); ; Make sure we can use the vsetvli from the producing instruction. -define void @test_vsetvli_i16( %0, %1, * %2, i32 %3) nounwind { +define void @test_vsetvli_i16( %0, %1, * %2, iXLen %3) nounwind { ; CHECK-LABEL: test_vsetvli_i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu @@ -110,17 +112,17 @@ %a = call @llvm.riscv.vmseq.nxv1i16( %0, %1, - i32 %3) - call void @llvm.riscv.vsm.nxv1i1( %a, * %2, i32 %3) + iXLen %3) + call void @llvm.riscv.vsm.nxv1i1( %a, * %2, iXLen %3) ret void } declare @llvm.riscv.vmseq.nxv1i32( , , - i32); + iXLen); -define void @test_vsetvli_i32( %0, %1, * %2, i32 %3) nounwind { +define void @test_vsetvli_i32( %0, %1, * %2, iXLen %3) nounwind { ; CHECK-LABEL: test_vsetvli_i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu @@ -131,7 +133,7 @@ %a = call @llvm.riscv.vmseq.nxv1i32( %0, %1, - i32 %3) - call void @llvm.riscv.vsm.nxv1i1( %a, * %2, i32 %3) + iXLen %3) + call void @llvm.riscv.vsm.nxv1i1( %a, * %2, iXLen %3) ret void }