diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-vmv.ll b/llvm/test/CodeGen/RISCV/rvv/combine-vmv.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/combine-vmv.ll @@ -0,0 +1,122 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s + +declare @llvm.riscv.vmv.v.v.nxv4i32(, , iXLen) + +declare @llvm.riscv.vadd.nxv4i32.nxv4i32(, , , iXLen) + +define @vadd( %passthru, %a, %b, iXLen %vl1, iXLen %vl2) { +; CHECK-LABEL: vadd: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vadd.vv v10, v10, v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.riscv.vadd.nxv4i32.nxv4i32( poison, %a, %b, iXLen %vl1) + %w = call @llvm.riscv.vmv.v.v.nxv4i32( %passthru, %v, iXLen %vl2) + ret %w +} + +define @vadd_undef( %a, %b, iXLen %vl1, iXLen %vl2) { +; CHECK-LABEL: vadd_undef: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmv.v.v v8, v8 +; CHECK-NEXT: ret + %v = call @llvm.riscv.vadd.nxv4i32.nxv4i32( poison, %a, %b, iXLen %vl1) + %w = call @llvm.riscv.vmv.v.v.nxv4i32( poison, %v, iXLen %vl2) + ret %w +} + +; TODO: Is this correct if there's already a passthru in the src? +define @vadd_same_passthru( %passthru, %a, %b, iXLen %vl1, iXLen %vl2) { +; CHECK-LABEL: vadd_same_passthru: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: vadd.vv v14, v10, v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: vmv.v.v v8, v14 +; CHECK-NEXT: ret + %v = call @llvm.riscv.vadd.nxv4i32.nxv4i32( %passthru, %a, %b, iXLen %vl1) + %w = call @llvm.riscv.vmv.v.v.nxv4i32( %passthru, %v, iXLen %vl2) + ret %w +} + +declare @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(, , , , iXLen, iXLen) + +define @vadd_mask_ma( %passthru, %a, %b, %mask, iXLen %vl1, iXLen %vl2) { +; CHECK-LABEL: vadd_mask_ma: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vadd.vv v10, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( poison, %a, %b, %mask, iXLen %vl1, iXLen 2) + %w = call @llvm.riscv.vmv.v.v.nxv4i32( %passthru, %v, iXLen %vl2) + ret %w +} + +define @vadd_mask_mu( %passthru, %a, %b, %mask, iXLen %vl1, iXLen %vl2) { +; CHECK-LABEL: vadd_mask_mu: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vadd.vv v10, v10, v12, v0.t +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( poison, %a, %b, %mask, iXLen %vl1, iXLen 0) + %w = call @llvm.riscv.vmv.v.v.nxv4i32( %passthru, %v, iXLen %vl2) + ret %w +} + +declare @llvm.riscv.vle.nxv4i32(, ptr, iXLen) + +define @foldable_load( %passthru, ptr %p) { +; CHECK-LABEL: foldable_load: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma +; CHECK-NEXT: vle32.v v10, (a0) +; CHECK-NEXT: vsetivli zero, 2, e32, m2, tu, ma +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.riscv.vle.nxv4i32( poison, ptr %p, iXLen 4) + %w = call @llvm.riscv.vmv.v.v.nxv4i32( %passthru, %v, iXLen 2) + ret %w +} + +; Can't fold this as the VLs aren't constant. +define @unfoldable_load( %passthru, ptr %p, iXLen %vl1, iXLen %vl2) { +; CHECK-LABEL: unfoldable_load: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32.v v10, (a0) +; CHECK-NEXT: vsetvli zero, a2, e32, m2, tu, ma +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.riscv.vle.nxv4i32( poison, ptr %p, iXLen %vl1) + %w = call @llvm.riscv.vmv.v.v.nxv4i32( %passthru, %v, iXLen %vl2) + ret %w +} + +declare @llvm.riscv.vmv.v.v.nxv4f32(, , iXLen) + +declare @llvm.riscv.vfadd.nxv4f32.nxv4f32(, , , iXLen) + +define @vfadd( %passthru, %a, %b, iXLen %vl1, iXLen %vl2) { +; CHECK-LABEL: vfadd: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vfadd.vv v10, v10, v12 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.riscv.vfadd.nxv4f32.nxv4f32( poison, %a, %b, iXLen %vl1) + %w = call @llvm.riscv.vmv.v.v.nxv4f32( %passthru, %v, iXLen %vl2) + ret %w +}