diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -7712,6 +7712,24 @@ MVT XLenVT = Subtarget.getXLenVT(); SDValue VL = DAG.getRegister(RISCV::X0, XLenVT); + // If the VT is LMUL=8, we need to split and reassemble. + if (VecVT.getSizeInBits().getKnownMinValue() == (8 * RISCV::RVVBitsPerBlock)) { + auto [Op0Lo, Op0Hi] = DAG.SplitVectorOperand(Op.getNode(), 0); + auto [Op1Lo, Op1Hi] = DAG.SplitVectorOperand(Op.getNode(), 1); + EVT SplitVT = Op0Lo.getValueType(); + + SDValue ResLo = DAG.getNode(ISD::VECTOR_INTERLEAVE, DL, + DAG.getVTList(SplitVT, SplitVT), Op0Lo, Op1Lo); + SDValue ResHi = DAG.getNode(ISD::VECTOR_INTERLEAVE, DL, + DAG.getVTList(SplitVT, SplitVT), Op0Hi, Op1Hi); + + SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT, + ResLo.getValue(0), ResLo.getValue(1)); + SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT, + ResHi.getValue(0), ResHi.getValue(1)); + return DAG.getMergeValues({Lo, Hi}, DL); + } + SDValue Interleaved; // If the element type is smaller than ELEN, then we can interleave with diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll --- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll @@ -93,6 +93,137 @@ declare @llvm.experimental.vector.interleave2.nxv8i32(, ) declare @llvm.experimental.vector.interleave2.nxv4i64(, ) +define @vector_interleave_nxv128i1_nxv64i1( %a, %b) { +; CHECK-LABEL: vector_interleave_nxv128i1_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v9, v0 +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma +; CHECK-NEXT: vmv.v.i v24, 0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmerge.vim v16, v24, 1, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmerge.vim v8, v24, 1, v0 +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; CHECK-NEXT: vwaddu.vv v24, v8, v16 +; CHECK-NEXT: li a0, -1 +; CHECK-NEXT: vwmaccu.vx v24, a0, v16 +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma +; CHECK-NEXT: vmsne.vi v0, v24, 0 +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; CHECK-NEXT: vwaddu.vv v24, v12, v20 +; CHECK-NEXT: vwmaccu.vx v24, a0, v20 +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma +; CHECK-NEXT: vmsne.vi v8, v24, 0 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.interleave2.nxv128i1( %a, %b) + ret %res +} + +define @vector_interleave_nxv128i8_nxv64i8( %a, %b) { +; CHECK-LABEL: vector_interleave_nxv128i8_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv8r.v v24, v8 +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; CHECK-NEXT: vwaddu.vv v8, v24, v16 +; CHECK-NEXT: li a0, -1 +; CHECK-NEXT: vwmaccu.vx v8, a0, v16 +; CHECK-NEXT: vwaddu.vv v0, v28, v20 +; CHECK-NEXT: vwmaccu.vx v0, a0, v20 +; CHECK-NEXT: vmv8r.v v16, v0 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.interleave2.nxv128i8( %a, %b) + ret %res +} + +define @vector_interleave_nxv64i16_nxv32i16( %a, %b) { +; CHECK-LABEL: vector_interleave_nxv64i16_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv8r.v v24, v8 +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vwaddu.vv v8, v24, v16 +; CHECK-NEXT: li a0, -1 +; CHECK-NEXT: vwmaccu.vx v8, a0, v16 +; CHECK-NEXT: vwaddu.vv v0, v28, v20 +; CHECK-NEXT: vwmaccu.vx v0, a0, v20 +; CHECK-NEXT: vmv8r.v v16, v0 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.interleave2.nxv64i16( %a, %b) + ret %res +} + +define @vector_interleave_nxv32i32_nxv16i32( %a, %b) { +; CHECK-LABEL: vector_interleave_nxv32i32_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv8r.v v24, v8 +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vwaddu.vv v8, v24, v16 +; CHECK-NEXT: li a0, -1 +; CHECK-NEXT: vwmaccu.vx v8, a0, v16 +; CHECK-NEXT: vwaddu.vv v0, v28, v20 +; CHECK-NEXT: vwmaccu.vx v0, a0, v20 +; CHECK-NEXT: vmv8r.v v16, v0 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.interleave2.nxv32i32( %a, %b) + ret %res +} + +define @vector_interleave_nxv16i64_nxv8i64( %a, %b) { +; CHECK-LABEL: vector_interleave_nxv16i64_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 1 +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vid.v v24 +; CHECK-NEXT: vand.vi v26, v24, 1 +; CHECK-NEXT: vmsne.vi v0, v26, 0 +; CHECK-NEXT: vsrl.vi v2, v24, 1 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vadd.vx v2, v2, a0, v0.t +; CHECK-NEXT: vmv4r.v v12, v16 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vrgatherei16.vv v24, v8, v2, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vrgatherei16.vv v24, v16, v2, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmv.v.v v16, v24 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.interleave2.nxv16i64( %a, %b) + ret %res +} + +declare @llvm.experimental.vector.interleave2.nxv128i1(, ) +declare @llvm.experimental.vector.interleave2.nxv128i8(, ) +declare @llvm.experimental.vector.interleave2.nxv64i16(, ) +declare @llvm.experimental.vector.interleave2.nxv32i32(, ) +declare @llvm.experimental.vector.interleave2.nxv16i64(, ) + ; Floats define @vector_interleave_nxv4f16_nxv2f16( %a, %b) { @@ -193,3 +324,90 @@ declare @llvm.experimental.vector.interleave2.nxv16f16(, ) declare @llvm.experimental.vector.interleave2.nxv8f32(, ) declare @llvm.experimental.vector.interleave2.nxv4f64(, ) + +define @vector_interleave_nxv64f16_nxv32f16( %a, %b) { +; CHECK-LABEL: vector_interleave_nxv64f16_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv8r.v v24, v8 +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vwaddu.vv v8, v24, v16 +; CHECK-NEXT: li a0, -1 +; CHECK-NEXT: vwmaccu.vx v8, a0, v16 +; CHECK-NEXT: vwaddu.vv v0, v28, v20 +; CHECK-NEXT: vwmaccu.vx v0, a0, v20 +; CHECK-NEXT: vmv8r.v v16, v0 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.interleave2.nxv64f16( %a, %b) + ret %res +} + +define @vector_interleave_nxv32f32_nxv16f32( %a, %b) { +; CHECK-LABEL: vector_interleave_nxv32f32_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv8r.v v24, v8 +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vwaddu.vv v8, v24, v16 +; CHECK-NEXT: li a0, -1 +; CHECK-NEXT: vwmaccu.vx v8, a0, v16 +; CHECK-NEXT: vwaddu.vv v0, v28, v20 +; CHECK-NEXT: vwmaccu.vx v0, a0, v20 +; CHECK-NEXT: vmv8r.v v16, v0 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.interleave2.nxv32f32( %a, %b) + ret %res +} + +define @vector_interleave_nxv16f64_nxv8f64( %a, %b) { +; CHECK-LABEL: vector_interleave_nxv16f64_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 1 +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vid.v v24 +; CHECK-NEXT: vand.vi v26, v24, 1 +; CHECK-NEXT: vmsne.vi v0, v26, 0 +; CHECK-NEXT: vsrl.vi v2, v24, 1 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vadd.vx v2, v2, a0, v0.t +; CHECK-NEXT: vmv4r.v v12, v16 +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; CHECK-NEXT: vrgatherei16.vv v24, v8, v2, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vrgatherei16.vv v24, v16, v2, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmv.v.v v16, v24 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.interleave2.nxv16f64( %a, %b) + ret %res +} + +declare @llvm.experimental.vector.interleave2.nxv64f16(, ) +declare @llvm.experimental.vector.interleave2.nxv32f32(, ) +declare @llvm.experimental.vector.interleave2.nxv16f64(, )