diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll @@ -82,6 +82,18 @@ ret {<16 x i8>, <16 x i8>} %retval } +; FIXME: Shouldn't be lowered to vlseg because it's unaligned +define {<8 x i16>, <8 x i16>} @vector_deinterleave_load_v8i16_v16i16_align1(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_v8i16_v16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16.v v8, (a0) +; CHECK-NEXT: ret + %vec = load <16 x i16>, ptr %p, align 1 + %retval = call {<8 x i16>, <8 x i16>} @llvm.experimental.vector.deinterleave2.v16i16(<16 x i16> %vec) + ret {<8 x i16>, <8 x i16>} %retval +} + define {<8 x i16>, <8 x i16>} @vector_deinterleave_load_v8i16_v16i16(ptr %p) { ; CHECK-LABEL: vector_deinterleave_load_v8i16_v16i16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleave-store.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleave-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleave-store.ll @@ -29,6 +29,18 @@ ret void } +; FIXME: Shouldn't be lowered to vsseg because it's unaligned +define void @vector_interleave_store_v16i16_v8i16_align1(<8 x i16> %a, <8 x i16> %b, ptr %p) { +; CHECK-LABEL: vector_interleave_store_v16i16_v8i16_align1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: ret + %res = call <16 x i16> @llvm.experimental.vector.interleave2.v16i16(<8 x i16> %a, <8 x i16> %b) + store <16 x i16> %res, ptr %p, align 1 + ret void +} + define void @vector_interleave_store_v16i16_v8i16(<8 x i16> %a, <8 x i16> %b, ptr %p) { ; CHECK-LABEL: vector_interleave_store_v16i16_v8i16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll --- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll @@ -39,6 +39,18 @@ ret {, } %retval } +; FIXME: Shouldn't be lowered to vlseg because it's unaligned +define {, } @vector_deinterleave_load_nxv8i16_nxv16i16_align1(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_nxv8i16_nxv16i16_align1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vlseg2e16.v v8, (a0) +; CHECK-NEXT: ret + %vec = load , ptr %p, align 1 + %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv16i16( %vec) + ret {, } %retval +} + define {, } @vector_deinterleave_load_nxv8i16_nxv16i16(ptr %p) { ; CHECK-LABEL: vector_deinterleave_load_nxv8i16_nxv16i16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll --- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll @@ -32,6 +32,18 @@ ret void } +; FIXME: Shouldn't be lowered to vsseg because it's unaligned +define void @vector_interleave_store_nxv16i16_nxv8i16_align1( %a, %b, ptr %p) { +; CHECK-LABEL: vector_interleave_store_nxv16i16_nxv8i16_align1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.interleave2.nxv16i16( %a, %b) + store %res, ptr %p, align 1 + ret void +} + define void @vector_interleave_store_nxv16i16_nxv8i16( %a, %b, ptr %p) { ; CHECK-LABEL: vector_interleave_store_nxv16i16_nxv8i16: ; CHECK: # %bb.0: