diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll @@ -860,3 +860,35 @@ %vc = add %va, %splat ret %vc } + +define @vadd_xx_nxv8i64(i64 %a, i64 %b) { +; RV32-LABEL: vadd_xx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v8, (a0), zero +; RV32-NEXT: sw a3, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vadd_xx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vmv.v.x v8, a0 +; RV64-NEXT: vadd.vx v8, v8, a1 +; RV64-NEXT: ret + %head1 = insertelement poison, i64 %a, i32 0 + %splat1 = shufflevector %head1, poison, zeroinitializer + %head2 = insertelement poison, i64 %b, i32 0 + %splat2 = shufflevector %head2, poison, zeroinitializer + %v = add %splat1, %splat2 + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll @@ -1352,3 +1352,34 @@ ret %vc } +define @vand_xx_nxv8i64(i64 %a, i64 %b) { +; RV32-LABEL: vand_xx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v8, (a0), zero +; RV32-NEXT: sw a3, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vand_xx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vmv.v.x v8, a0 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: ret + %head1 = insertelement poison, i64 %a, i32 0 + %splat1 = shufflevector %head1, poison, zeroinitializer + %head2 = insertelement poison, i64 %b, i32 0 + %splat2 = shufflevector %head2, poison, zeroinitializer + %v = and %splat1, %splat2 + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 +; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 define @vmul_vv_nxv1i8( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv1i8: @@ -919,3 +919,35 @@ %vc = mul %va, %splat ret %vc } + +define @vmul_xx_nxv8i64(i64 %a, i64 %b) { +; RV32-LABEL: vmul_xx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v8, (a0), zero +; RV32-NEXT: sw a3, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vmul_xx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vmv.v.x v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: ret + %head1 = insertelement poison, i64 %a, i32 0 + %splat1 = shufflevector %head1, poison, zeroinitializer + %head2 = insertelement poison, i64 %b, i32 0 + %splat2 = shufflevector %head2, poison, zeroinitializer + %v = mul %splat1, %splat2 + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll @@ -1144,3 +1144,35 @@ %vc = or %va, %splat ret %vc } + +define @vor_xx_nxv8i64(i64 %a, i64 %b) { +; RV32-LABEL: vor_xx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v8, (a0), zero +; RV32-NEXT: sw a3, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vor_xx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vmv.v.x v8, a0 +; RV64-NEXT: vor.vx v8, v8, a1 +; RV64-NEXT: ret + %head1 = insertelement poison, i64 %a, i32 0 + %splat1 = shufflevector %head1, poison, zeroinitializer + %head2 = insertelement poison, i64 %b, i32 0 + %splat2 = shufflevector %head2, poison, zeroinitializer + %v = or %splat1, %splat2 + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll @@ -839,3 +839,34 @@ ret %vc } +define @vsub_xx_nxv8i64(i64 %a, i64 %b) { +; RV32-LABEL: vsub_xx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v8, (a0), zero +; RV32-NEXT: sw a3, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vsub_xx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vmv.v.x v8, a0 +; RV64-NEXT: vsub.vx v8, v8, a1 +; RV64-NEXT: ret + %head1 = insertelement poison, i64 %a, i32 0 + %splat1 = shufflevector %head1, poison, zeroinitializer + %head2 = insertelement poison, i64 %b, i32 0 + %splat2 = shufflevector %head2, poison, zeroinitializer + %v = sub %splat1, %splat2 + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll @@ -1352,3 +1352,34 @@ ret %vc } +define @vxor_xx_nxv8i64(i64 %a, i64 %b) { +; RV32-LABEL: vxor_xx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v8, (a0), zero +; RV32-NEXT: sw a3, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vxor.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vxor_xx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vmv.v.x v8, a0 +; RV64-NEXT: vxor.vx v8, v8, a1 +; RV64-NEXT: ret + %head1 = insertelement poison, i64 %a, i32 0 + %splat1 = shufflevector %head1, poison, zeroinitializer + %head2 = insertelement poison, i64 %b, i32 0 + %splat2 = shufflevector %head2, poison, zeroinitializer + %v = xor %splat1, %splat2 + ret %v +}