diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -0,0 +1,82 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+m,+f,+d,+a,+c,+experimental-v \ +; RUN: -verify-machineinstrs -O2 < %s | FileCheck %s + +declare i64 @llvm.riscv.vsetvli(i64, i64, i64) +declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( + , + , + i64) +declare @llvm.riscv.vle.mask.nxv1i64( + , + *, + , + i64) + +define @test1(i64 %avl, %a, %b) nounwind { +; CHECK-LABEL: test1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( + %a, + %b, + i64 %0) + ret %1 +} + +define @test2(i64 %avl, %a, %b) nounwind { +; CHECK-LABEL: test2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( + %a, + %b, + i64 %avl) + ret %1 +} + +define @test3(i64 %avl, %a, * %b, %c) nounwind { +; CHECK-LABEL: test3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vle64.v v8, (a1), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0) + %1 = call @llvm.riscv.vle.mask.nxv1i64( + %a, + * %b, + %c, + i64 %0) + + ret %1 +} + +define @test4(i64 %avl, %a, * %b, %c) nounwind { +; CHECK-LABEL: test4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vle64.v v8, (a1), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0) + %1 = call @llvm.riscv.vle.mask.nxv1i64( + %a, + * %b, + %c, + i64 %avl) + + ret %1 +}