diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -target-abi=ilp32 \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV32 %s ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64 \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV64 %s define @vnsrl_wx_i64_nxv1i32( %va, i64 %b) { ; CHECK-LABEL: vnsrl_wx_i64_nxv1i32: @@ -632,3 +632,77 @@ %y = trunc %x to ret %y } + +define @vnsrl_wx_i64_nxv1i16( %va, i64 %b) { +; RV32-LABEL: vnsrl_wx_i64_nxv1i16: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vnsrl.wi v9, v9, 0 +; RV32-NEXT: vsrl.vv v8, v8, v9 +; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; RV32-NEXT: vnsrl.wi v8, v8, 0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vnsrl_wx_i64_nxv1i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; RV64-NEXT: vnsrl.wi v9, v9, 0 +; RV64-NEXT: vsrl.vv v8, v8, v9 +; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; RV64-NEXT: vnsrl.wi v8, v8, 0 +; RV64-NEXT: ret + %head = insertelement poison, i64 %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vb = trunc %splat to + %x = lshr %va, %vb + %y = trunc %x to + ret %y +} + +define @vnsrl_wx_i64_nxv1i8( %va, i64 %b) { +; RV32-LABEL: vnsrl_wx_i64_nxv1i8: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vnsrl.wi v9, v9, 0 +; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; RV32-NEXT: vnsrl.wi v9, v9, 0 +; RV32-NEXT: vsrl.vv v8, v8, v9 +; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma +; RV32-NEXT: vnsrl.wi v8, v8, 0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vnsrl_wx_i64_nxv1i8: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; RV64-NEXT: vnsrl.wi v9, v9, 0 +; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; RV64-NEXT: vnsrl.wi v9, v9, 0 +; RV64-NEXT: vsrl.vv v8, v8, v9 +; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma +; RV64-NEXT: vnsrl.wi v8, v8, 0 +; RV64-NEXT: ret + %head = insertelement poison, i64 %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vb = trunc %splat to + %x = lshr %va, %vb + %y = trunc %x to + ret %y +}