diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-deinterleave-load.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-deinterleave-load.ll @@ -0,0 +1,241 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh | FileCheck %s -check-prefixes=CHECK,RV32 +; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh | FileCheck %s -check-prefixes=CHECK,RV64 + +; Integers + +define {<16 x i1>, <16 x i1>} @vector_deinterleave_load_v16i1_v32i1(ptr %p) { +; RV32-LABEL: vector_deinterleave_load_v16i1_v32i1: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; RV32-NEXT: vlm.v v8, (a0) +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV32-NEXT: vslidedown.vi v0, v8, 2 +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV32-NEXT: vmv.v.i v9, 0 +; RV32-NEXT: vmerge.vim v10, v9, 1, v0 +; RV32-NEXT: vmv1r.v v0, v8 +; RV32-NEXT: vmerge.vim v8, v9, 1, v0 +; RV32-NEXT: vid.v v9 +; RV32-NEXT: vadd.vv v11, v9, v9 +; RV32-NEXT: vrgather.vv v9, v8, v11 +; RV32-NEXT: vadd.vi v12, v11, -16 +; RV32-NEXT: lui a0, 16 +; RV32-NEXT: addi a0, a0, -256 +; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; RV32-NEXT: vmv.v.x v0, a0 +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32-NEXT: vrgather.vv v9, v10, v12, v0.t +; RV32-NEXT: vmsne.vi v9, v9, 0 +; RV32-NEXT: vadd.vi v12, v11, 1 +; RV32-NEXT: vrgather.vv v13, v8, v12 +; RV32-NEXT: vadd.vi v8, v11, -15 +; RV32-NEXT: vrgather.vv v13, v10, v8, v0.t +; RV32-NEXT: vmsne.vi v8, v13, 0 +; RV32-NEXT: vmv.v.v v0, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vector_deinterleave_load_v16i1_v32i1: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 32 +; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; RV64-NEXT: vlm.v v8, (a0) +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV64-NEXT: vslidedown.vi v0, v8, 2 +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV64-NEXT: vmv.v.i v9, 0 +; RV64-NEXT: vmerge.vim v10, v9, 1, v0 +; RV64-NEXT: vmv1r.v v0, v8 +; RV64-NEXT: vmerge.vim v8, v9, 1, v0 +; RV64-NEXT: vid.v v9 +; RV64-NEXT: vadd.vv v11, v9, v9 +; RV64-NEXT: vrgather.vv v9, v8, v11 +; RV64-NEXT: vadd.vi v12, v11, -16 +; RV64-NEXT: lui a0, 16 +; RV64-NEXT: addiw a0, a0, -256 +; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; RV64-NEXT: vmv.v.x v0, a0 +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-NEXT: vrgather.vv v9, v10, v12, v0.t +; RV64-NEXT: vmsne.vi v9, v9, 0 +; RV64-NEXT: vadd.vi v12, v11, 1 +; RV64-NEXT: vrgather.vv v13, v8, v12 +; RV64-NEXT: vadd.vi v8, v11, -15 +; RV64-NEXT: vrgather.vv v13, v10, v8, v0.t +; RV64-NEXT: vmsne.vi v8, v13, 0 +; RV64-NEXT: vmv.v.v v0, v9 +; RV64-NEXT: ret + %vec = load <32 x i1>, ptr %p + %retval = call {<16 x i1>, <16 x i1>} @llvm.experimental.vector.deinterleave2.v32i1(<32 x i1> %vec) + ret {<16 x i1>, <16 x i1>} %retval +} + +define {<16 x i8>, <16 x i8>} @vector_deinterleave_load_v16i8_v32i8(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_v16i8_v32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 32 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vle8.v v10, (a0) +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: vnsrl.wi v9, v10, 8 +; CHECK-NEXT: ret + %vec = load <32 x i8>, ptr %p + %retval = call {<16 x i8>, <16 x i8>} @llvm.experimental.vector.deinterleave2.v32i8(<32 x i8> %vec) + ret {<16 x i8>, <16 x i8>} %retval +} + +define {<8 x i16>, <8 x i16>} @vector_deinterleave_load_v8i16_v16i16(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_v8i16_v16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vle16.v v10, (a0) +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: vnsrl.wi v9, v10, 16 +; CHECK-NEXT: ret + %vec = load <16 x i16>, ptr %p + %retval = call {<8 x i16>, <8 x i16>} @llvm.experimental.vector.deinterleave2.v16i16(<16 x i16> %vec) + ret {<8 x i16>, <8 x i16>} %retval +} + +define {<4 x i32>, <4 x i32>} @vector_deinterleave_load_v4i32_vv8i32(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_v4i32_vv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vle32.v v10, (a0) +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vnsrl.wx v9, v10, a0 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: ret + %vec = load <8 x i32>, ptr %p + %retval = call {<4 x i32>, <4 x i32>} @llvm.experimental.vector.deinterleave2.v8i32(<8 x i32> %vec) + ret {<4 x i32>, <4 x i32>} %retval +} + +define {<2 x i64>, <2 x i64>} @vector_deinterleave_load_v2i64_v4i64(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_v2i64_v4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma +; CHECK-NEXT: vslidedown.vi v10, v8, 2 +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; CHECK-NEXT: vmv.v.i v0, 2 +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vrgather.vi v9, v8, 1 +; CHECK-NEXT: vrgather.vi v9, v10, 1, v0.t +; CHECK-NEXT: vslideup.vi v8, v10, 1 +; CHECK-NEXT: ret + %vec = load <4 x i64>, ptr %p + %retval = call {<2 x i64>, <2 x i64>} @llvm.experimental.vector.deinterleave2.v4i64(<4 x i64> %vec) + ret {<2 x i64>, <2 x i64>} %retval +} + +declare {<16 x i1>, <16 x i1>} @llvm.experimental.vector.deinterleave2.v32i1(<32 x i1>) +declare {<16 x i8>, <16 x i8>} @llvm.experimental.vector.deinterleave2.v32i8(<32 x i8>) +declare {<8 x i16>, <8 x i16>} @llvm.experimental.vector.deinterleave2.v16i16(<16 x i16>) +declare {<4 x i32>, <4 x i32>} @llvm.experimental.vector.deinterleave2.v8i32(<8 x i32>) +declare {<2 x i64>, <2 x i64>} @llvm.experimental.vector.deinterleave2.v4i64(<4 x i64>) + +; Floats + +define {<2 x half>, <2 x half>} @vector_deinterleave_load_v2f16_v4f16(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_v2f16_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vle16.v v9, (a0) +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vnsrl.wi v8, v9, 0 +; CHECK-NEXT: vnsrl.wi v9, v9, 16 +; CHECK-NEXT: ret + %vec = load <4 x half>, ptr %p + %retval = call {<2 x half>, <2 x half>} @llvm.experimental.vector.deinterleave2.v4f16(<4 x half> %vec) + ret {<2 x half>, <2 x half>} %retval +} + +define {<4 x half>, <4 x half>} @vector_deinterleave_load_v4f16_v8f16(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_v4f16_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vle16.v v9, (a0) +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vnsrl.wi v8, v9, 0 +; CHECK-NEXT: vnsrl.wi v9, v9, 16 +; CHECK-NEXT: ret + %vec = load <8 x half>, ptr %p + %retval = call {<4 x half>, <4 x half>} @llvm.experimental.vector.deinterleave2.v8f16(<8 x half> %vec) + ret {<4 x half>, <4 x half>} %retval +} + +define {<2 x float>, <2 x float>} @vector_deinterleave_load_v2f32_v4f32(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_v2f32_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vnsrl.wx v9, v8, a0 +; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: ret + %vec = load <4 x float>, ptr %p + %retval = call {<2 x float>, <2 x float>} @llvm.experimental.vector.deinterleave2.v4f32(<4 x float> %vec) + ret {<2 x float>, <2 x float>} %retval +} + +define {<8 x half>, <8 x half>} @vector_deinterleave_load_v8f16_v16f16(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_v8f16_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vle16.v v10, (a0) +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: vnsrl.wi v9, v10, 16 +; CHECK-NEXT: ret + %vec = load <16 x half>, ptr %p + %retval = call {<8 x half>, <8 x half>} @llvm.experimental.vector.deinterleave2.v16f16(<16 x half> %vec) + ret {<8 x half>, <8 x half>} %retval +} + +define {<4 x float>, <4 x float>} @vector_deinterleave_load_v4f32_v8f32(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_v4f32_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vle32.v v10, (a0) +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vnsrl.wx v9, v10, a0 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: ret + %vec = load <8 x float>, ptr %p + %retval = call {<4 x float>, <4 x float>} @llvm.experimental.vector.deinterleave2.v8f32(<8 x float> %vec) + ret {<4 x float>, <4 x float>} %retval +} + +define {<2 x double>, <2 x double>} @vector_deinterleave_load_v2f64_v4f64(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_v2f64_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma +; CHECK-NEXT: vslidedown.vi v10, v8, 2 +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; CHECK-NEXT: vmv.v.i v0, 2 +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vrgather.vi v9, v8, 1 +; CHECK-NEXT: vrgather.vi v9, v10, 1, v0.t +; CHECK-NEXT: vslideup.vi v8, v10, 1 +; CHECK-NEXT: ret + %vec = load <4 x double>, ptr %p + %retval = call {<2 x double>, <2 x double>} @llvm.experimental.vector.deinterleave2.v4f64(<4 x double> %vec) + ret {<2 x double>, <2 x double>} %retval +} + +declare {<2 x half>,<2 x half>} @llvm.experimental.vector.deinterleave2.v4f16(<4 x half>) +declare {<4 x half>, <4 x half>} @llvm.experimental.vector.deinterleave2.v8f16(<8 x half>) +declare {<2 x float>, <2 x float>} @llvm.experimental.vector.deinterleave2.v4f32(<4 x float>) +declare {<8 x half>, <8 x half>} @llvm.experimental.vector.deinterleave2.v16f16(<16 x half>) +declare {<4 x float>, <4 x float>} @llvm.experimental.vector.deinterleave2.v8f32(<8 x float>) +declare {<2 x double>, <2 x double>} @llvm.experimental.vector.deinterleave2.v4f64(<4 x double>) diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll @@ -0,0 +1,265 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh,+m | FileCheck %s +; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh,+m | FileCheck %s + +; Integers + +define {, } @vector_deinterleave_load_nxv16i1_nxv32i1(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_nxv16i1_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; CHECK-NEXT: vlm.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 2 +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmerge.vim v14, v10, 1, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmerge.vim v12, v10, 1, v0 +; CHECK-NEXT: vnsrl.wi v8, v12, 0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: vnsrl.wi v10, v12, 8 +; CHECK-NEXT: vmsne.vi v8, v10, 0 +; CHECK-NEXT: ret + %vec = load , ptr %p + %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv32i1( %vec) + ret {, } %retval +} + +define {, } @vector_deinterleave_load_nxv16i8_nxv32i8(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_nxv16i8_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vl4r.v v12, (a0) +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vnsrl.wi v8, v12, 0 +; CHECK-NEXT: vnsrl.wi v10, v12, 8 +; CHECK-NEXT: ret + %vec = load , ptr %p + %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv32i8( %vec) + ret {, } %retval +} + +define {, } @vector_deinterleave_load_nxv8i16_nxv16i16(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_nxv8i16_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vl4re16.v v12, (a0) +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vnsrl.wi v8, v12, 0 +; CHECK-NEXT: vnsrl.wi v10, v12, 16 +; CHECK-NEXT: ret + %vec = load , ptr %p + %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv16i16( %vec) + ret {, } %retval +} + +define {, } @vector_deinterleave_load_nxv4i32_nxvv8i32(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_nxv4i32_nxvv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vl4re32.v v12, (a0) +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; CHECK-NEXT: vnsrl.wx v10, v12, a0 +; CHECK-NEXT: vnsrl.wi v8, v12, 0 +; CHECK-NEXT: ret + %vec = load , ptr %p + %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv8i32( %vec) + ret {, } %retval +} + +define {, } @vector_deinterleave_load_nxv2i64_nxv4i64(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_nxv2i64_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vl4re64.v v12, (a0) +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: vadd.vv v16, v8, v8 +; CHECK-NEXT: vrgather.vv v8, v12, v16 +; CHECK-NEXT: vadd.vi v16, v16, 1 +; CHECK-NEXT: vrgather.vv v20, v12, v16 +; CHECK-NEXT: vmv2r.v v10, v20 +; CHECK-NEXT: ret + %vec = load , ptr %p + %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv4i64( %vec) + ret {, } %retval +} + +define {, } @vector_deinterleave_load_nxv4i64_nxv8i64(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_nxv4i64_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re64.v v16, (a0) +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: vadd.vv v24, v8, v8 +; CHECK-NEXT: vrgather.vv v8, v16, v24 +; CHECK-NEXT: vadd.vi v24, v24, 1 +; CHECK-NEXT: vrgather.vv v0, v16, v24 +; CHECK-NEXT: vmv4r.v v12, v0 +; CHECK-NEXT: ret + %vec = load , ptr %p + %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv8i64( %vec) + ret {, } %retval +} + +; This shouldn't be lowered to a vlseg because EMUL * NFIELDS >= 8 +define {, } @vector_deinterleave_load_nxv8i64_nxv16i64(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_nxv8i64_nxv16i64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a2, 24 +; CHECK-NEXT: mul a1, a1, a2 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, a0, a1 +; CHECK-NEXT: vl8re64.v v16, (a1) +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: vadd.vv v0, v8, v8 +; CHECK-NEXT: vrgather.vv v8, v24, v0 +; CHECK-NEXT: vrgather.vv v24, v16, v0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vadd.vi v0, v0, 1 +; CHECK-NEXT: vrgather.vv v24, v16, v0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vrgather.vv v16, v24, v0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmv4r.v v12, v24 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vmv4r.v v20, v24 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 24 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %vec = load , ptr %p + %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv16i64( %vec) + ret {, } %retval +} + +declare {, } @llvm.experimental.vector.deinterleave2.nxv32i1() +declare {, } @llvm.experimental.vector.deinterleave2.nxv32i8() +declare {, } @llvm.experimental.vector.deinterleave2.nxv16i16() +declare {, } @llvm.experimental.vector.deinterleave2.nxv8i32() +declare {, } @llvm.experimental.vector.deinterleave2.nxv4i64() +declare {, } @llvm.experimental.vector.deinterleave2.nxv8i64() +declare {, } @llvm.experimental.vector.deinterleave2.nxv16i64() + +; Floats + +define {, } @vector_deinterleave_load_nxv2f16_nxv4f16(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_nxv2f16_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vl1re16.v v9, (a0) +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vnsrl.wi v8, v9, 0 +; CHECK-NEXT: vnsrl.wi v9, v9, 16 +; CHECK-NEXT: ret + %vec = load , ptr %p + %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv4f16( %vec) + ret {, } %retval +} + +define {, } @vector_deinterleave_load_nxv4f16_nxv8f16(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_nxv4f16_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vl2re16.v v10, (a0) +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: vnsrl.wi v9, v10, 16 +; CHECK-NEXT: ret + %vec = load , ptr %p + %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv8f16( %vec) + ret {, } %retval +} + +define {, } @vector_deinterleave_load_nxv2f32_nxv4f32(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_nxv2f32_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vl2re32.v v10, (a0) +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vnsrl.wx v9, v10, a0 +; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: ret + %vec = load , ptr %p + %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv4f32( %vec) + ret {, } %retval +} + +define {, } @vector_deinterleave_load_nxv8f16_nxv16f16(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_nxv8f16_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vl4re16.v v12, (a0) +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vnsrl.wi v8, v12, 0 +; CHECK-NEXT: vnsrl.wi v10, v12, 16 +; CHECK-NEXT: ret + %vec = load , ptr %p + %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv16f16( %vec) + ret {, } %retval +} + +define {, } @vector_deinterleave_load_nxv4f32_nxv8f32(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_nxv4f32_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vl4re32.v v12, (a0) +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; CHECK-NEXT: vnsrl.wx v10, v12, a0 +; CHECK-NEXT: vnsrl.wi v8, v12, 0 +; CHECK-NEXT: ret + %vec = load , ptr %p + %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv8f32( %vec) + ret {, } %retval +} + +define {, } @vector_deinterleave_load_nxv2f64_nxv4f64(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_nxv2f64_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vl4re64.v v12, (a0) +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: vadd.vv v16, v8, v8 +; CHECK-NEXT: vrgather.vv v8, v12, v16 +; CHECK-NEXT: vadd.vi v16, v16, 1 +; CHECK-NEXT: vrgather.vv v20, v12, v16 +; CHECK-NEXT: vmv2r.v v10, v20 +; CHECK-NEXT: ret + %vec = load , ptr %p + %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv4f64( %vec) + ret {, } %retval +} + +declare {,} @llvm.experimental.vector.deinterleave2.nxv4f16() +declare {, } @llvm.experimental.vector.deinterleave2.nxv8f16() +declare {, } @llvm.experimental.vector.deinterleave2.nxv4f32() +declare {, } @llvm.experimental.vector.deinterleave2.nxv16f16() +declare {, } @llvm.experimental.vector.deinterleave2.nxv8f32() +declare {, } @llvm.experimental.vector.deinterleave2.nxv4f64()