diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -926,6 +926,14 @@ if (Subtarget.hasStdExtD()) setOperationAction(ISD::BITCAST, MVT::f64, Custom); } + + setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv1i1, MVT::nxv1i8); + setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv2i1, MVT::nxv2i8); + setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv4i1, MVT::nxv4i8); + setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv8i1, MVT::nxv8i8); + setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv16i1, MVT::nxv16i8); + setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv32i1, MVT::nxv32i8); + setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv64i1, MVT::nxv64i8); } // Function alignments. diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll --- a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll @@ -1,8 +1,190 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple riscv32 -mattr=+m,+f,+d,+v,+zfh,+experimental-zvfh < %s | FileCheck %s ; RUN: llc -mtriple riscv64 -mattr=+m,+f,+d,+v,+zfh,+experimental-zvfh < %s | FileCheck %s ; Tests assume VLEN=128 or vscale_range_min=2. +declare @llvm.experimental.vector.splice.nxv1i1(, , i32) + +define @splice_nxv1i1_offset_zero( %a, %b) #0 { +; CHECK-LABEL: splice_nxv1i1_offset_zero: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmerge.vim v10, v9, 1, v0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 3 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vslidedown.vi v10, v10, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmerge.vim v8, v9, 1, v0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu +; CHECK-NEXT: vslideup.vx v10, v8, a0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vand.vi v8, v10, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.splice.nxv1i1( %a, %b, i32 1) + ret %res +} + +declare @llvm.experimental.vector.splice.nxv2i1(, , i32) + +define @splice_nxv2i1_offset_zero( %a, %b) #0 { +; CHECK-LABEL: splice_nxv2i1_offset_zero: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmerge.vim v10, v9, 1, v0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 2 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vslidedown.vi v10, v10, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmerge.vim v8, v9, 1, v0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu +; CHECK-NEXT: vslideup.vx v10, v8, a0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vand.vi v8, v10, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.splice.nxv2i1( %a, %b, i32 1) + ret %res +} + +declare @llvm.experimental.vector.splice.nxv4i1(, , i32) + +define @splice_nxv4i1_offset_zero( %a, %b) #0 { +; CHECK-LABEL: splice_nxv4i1_offset_zero: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmerge.vim v10, v9, 1, v0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vslidedown.vi v10, v10, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmerge.vim v8, v9, 1, v0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu +; CHECK-NEXT: vslideup.vx v10, v8, a0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vand.vi v8, v10, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.splice.nxv4i1( %a, %b, i32 1) + ret %res +} + +declare @llvm.experimental.vector.splice.nxv8i1(, , i32) + +define @splice_nxv8i1_offset_zero( %a, %b) #0 { +; CHECK-LABEL: splice_nxv8i1_offset_zero: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmerge.vim v10, v9, 1, v0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v10, v10, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmerge.vim v8, v9, 1, v0 +; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu +; CHECK-NEXT: vslideup.vx v10, v8, a0 +; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-NEXT: vand.vi v8, v10, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.splice.nxv8i1( %a, %b, i32 1) + ret %res +} + +declare @llvm.experimental.vector.splice.nxv16i1(, , i32) + +define @splice_nxv16i1_offset_zero( %a, %b) #0 { +; CHECK-LABEL: splice_nxv16i1_offset_zero: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmerge.vim v12, v10, 1, v0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vslidedown.vi v12, v12, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 +; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu +; CHECK-NEXT: vslideup.vx v12, v8, a0 +; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-NEXT: vand.vi v8, v12, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.splice.nxv16i1( %a, %b, i32 1) + ret %res +} + +declare @llvm.experimental.vector.splice.nxv32i1(, , i32) + +define @splice_nxv32i1_offset_zero( %a, %b) #0 { +; CHECK-LABEL: splice_nxv32i1_offset_zero: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vmerge.vim v16, v12, 1, v0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vslidedown.vi v16, v16, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmerge.vim v8, v12, 1, v0 +; CHECK-NEXT: vsetvli zero, zero, e8, m4, tu, mu +; CHECK-NEXT: vslideup.vx v16, v8, a0 +; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; CHECK-NEXT: vand.vi v8, v16, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.splice.nxv32i1( %a, %b, i32 1) + ret %res +} + +declare @llvm.experimental.vector.splice.nxv64i1(, , i32) + +define @splice_nxv64i1_offset_zero( %a, %b) #0 { +; CHECK-LABEL: splice_nxv64i1_offset_zero: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmerge.vim v24, v16, 1, v0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vslidedown.vi v24, v24, 1 +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmerge.vim v8, v16, 1, v0 +; CHECK-NEXT: vsetvli zero, zero, e8, m8, tu, mu +; CHECK-NEXT: vslideup.vx v24, v8, a0 +; CHECK-NEXT: vsetvli zero, zero, e8, m8, ta, mu +; CHECK-NEXT: vand.vi v8, v24, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.splice.nxv64i1( %a, %b, i32 1) + ret %res +} + declare @llvm.experimental.vector.splice.nxv1i8(, , i32) define @splice_nxv1i8_offset_zero( %a, %b) #0 {