diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -740,9 +740,9 @@ /// returns the address of that location. Otherwise, returns nullptr. Value *getIRStackGuard(IRBuilderBase &IRB) const override; - /// Returns whether or not generating a fixed length interleaved load/store - /// intrinsic for this type will be legal. - bool isLegalInterleavedAccessType(FixedVectorType *, unsigned Factor, + /// Returns whether or not generating a interleaved load/store intrinsic for + /// this type will be legal. + bool isLegalInterleavedAccessType(VectorType *, unsigned Factor, const DataLayout &) const; /// Return true if a stride load store of the given result type and @@ -759,6 +759,9 @@ bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override; + bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II, + StoreInst *SI) const override; + bool supportKCFIBundles() const override { return true; } MachineInstr *EmitKCFICheck(MachineBasicBlock &MBB, diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -16671,24 +16671,29 @@ } bool RISCVTargetLowering::isLegalInterleavedAccessType( - FixedVectorType *VTy, unsigned Factor, const DataLayout &DL) const { - if (!Subtarget.useRVVForFixedLengthVectors()) - return false; + VectorType *VTy, unsigned Factor, const DataLayout &DL) const { EVT VT = getValueType(DL, VTy); - // Don't lower vlseg/vsseg for fixed length vector types that can't be split. + // Don't lower vlseg/vsseg for vector types that can't be split. if (!isTypeLegal(VT)) return false; if (!isLegalElementTypeForRVV(VT.getScalarType())) return false; - // Sometimes the interleaved access pass picks up splats as interleaves of one - // element. Don't lower these. - if (VTy->getNumElements() < 2) - return false; + MVT ContainerVT = VT.getSimpleVT(); + + if (auto *FVTy = dyn_cast(VTy)) { + if (!Subtarget.useRVVForFixedLengthVectors()) + return false; + // Sometimes the interleaved access pass picks up splats as interleaves of + // one element. Don't lower these. + if (FVTy->getNumElements() < 2) + return false; + + ContainerVT = getContainerForFixedLengthVector(VT.getSimpleVT()); + } // Need to make sure that EMUL * NFIELDS ≤ 8 - MVT ContainerVT = getContainerForFixedLengthVector(VT.getSimpleVT()); auto [LMUL, Fractional] = RISCVVType::decodeVLMUL(getLMUL(ContainerVT)); if (Fractional) return true; @@ -16761,6 +16766,12 @@ return true; } +static const Intrinsic::ID FixedVssegIntrIds[] = { + Intrinsic::riscv_seg2_store, Intrinsic::riscv_seg3_store, + Intrinsic::riscv_seg4_store, Intrinsic::riscv_seg5_store, + Intrinsic::riscv_seg6_store, Intrinsic::riscv_seg7_store, + Intrinsic::riscv_seg8_store}; + /// Lower an interleaved store into a vssegN intrinsic. /// /// E.g. Lower an interleaved store (Factor = 3): @@ -16791,14 +16802,8 @@ auto *XLenTy = Type::getIntNTy(SI->getContext(), Subtarget.getXLen()); - static const Intrinsic::ID FixedLenIntrIds[] = { - Intrinsic::riscv_seg2_store, Intrinsic::riscv_seg3_store, - Intrinsic::riscv_seg4_store, Intrinsic::riscv_seg5_store, - Intrinsic::riscv_seg6_store, Intrinsic::riscv_seg7_store, - Intrinsic::riscv_seg8_store}; - Function *VssegNFunc = - Intrinsic::getDeclaration(SI->getModule(), FixedLenIntrIds[Factor - 2], + Intrinsic::getDeclaration(SI->getModule(), FixedVssegIntrIds[Factor - 2], {VTy, SI->getPointerOperandType(), XLenTy}); auto Mask = SVI->getShuffleMask(); @@ -16821,6 +16826,51 @@ return true; } +bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(IntrinsicInst *II, + StoreInst *SI) const { + assert(SI->isSimple()); + IRBuilder<> Builder(SI); + + // Only interleave2 supported at present. + if (II->getIntrinsicID() != Intrinsic::experimental_vector_interleave2) + return false; + + unsigned Factor = 2; + + VectorType *VTy = cast(II->getType()); + VectorType *InVTy = cast(II->getOperand(0)->getType()); + + if (!isLegalInterleavedAccessType(InVTy, Factor, + SI->getModule()->getDataLayout())) + return false; + + Function *VssegNFunc; + Value *VL; + Type *XLenTy = Type::getIntNTy(SI->getContext(), Subtarget.getXLen()); + + if (auto *FVTy = dyn_cast(VTy)) { + VssegNFunc = Intrinsic::getDeclaration( + SI->getModule(), FixedVssegIntrIds[Factor - 2], + {InVTy, SI->getPointerOperandType(), XLenTy}); + VL = ConstantInt::get(XLenTy, FVTy->getNumElements()); + } else { + static const Intrinsic::ID IntrIds[] = { + Intrinsic::riscv_vsseg2, Intrinsic::riscv_vsseg3, + Intrinsic::riscv_vsseg4, Intrinsic::riscv_vsseg5, + Intrinsic::riscv_vsseg6, Intrinsic::riscv_vsseg7, + Intrinsic::riscv_vsseg8}; + + VssegNFunc = Intrinsic::getDeclaration(SI->getModule(), IntrIds[Factor - 2], + {InVTy, XLenTy}); + VL = Constant::getAllOnesValue(XLenTy); + } + + Builder.CreateCall(VssegNFunc, {II->getOperand(0), II->getOperand(1), + SI->getPointerOperand(), VL}); + + return true; +} + MachineInstr * RISCVTargetLowering::EmitKCFICheck(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator &MBBI, diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-interleave-store.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-interleave-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-interleave-store.ll @@ -32,11 +32,7 @@ ; CHECK-LABEL: vector_interleave_store_v16i16_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vwaddu.vv v10, v8, v9 -; CHECK-NEXT: li a1, -1 -; CHECK-NEXT: vwmaccu.vx v10, a1, v9 -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma -; CHECK-NEXT: vse16.v v10, (a0) +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret %res = call <16 x i16> @llvm.experimental.vector.interleave2.v16i16(<8 x i16> %a, <8 x i16> %b) store <16 x i16> %res, ptr %p @@ -47,11 +43,7 @@ ; CHECK-LABEL: vector_interleave_store_v8i32_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vwaddu.vv v10, v8, v9 -; CHECK-NEXT: li a1, -1 -; CHECK-NEXT: vwmaccu.vx v10, a1, v9 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; CHECK-NEXT: vse32.v v10, (a0) +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret %res = call <8 x i32> @llvm.experimental.vector.interleave2.v8i32(<4 x i32> %a, <4 x i32> %b) store <8 x i32> %res, ptr %p @@ -59,29 +51,11 @@ } define void @vector_interleave_store_v4i64_v2i64(<2 x i64> %a, <2 x i64> %b, ptr %p) { -; RV32-LABEL: vector_interleave_store_v4i64_v2i64: -; RV32: # %bb.0: -; RV32-NEXT: vmv1r.v v10, v9 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; RV32-NEXT: lui a1, %hi(.LCPI3_0) -; RV32-NEXT: addi a1, a1, %lo(.LCPI3_0) -; RV32-NEXT: vle16.v v12, (a1) -; RV32-NEXT: vslideup.vi v8, v10, 2 -; RV32-NEXT: vrgatherei16.vv v10, v8, v12 -; RV32-NEXT: vse64.v v10, (a0) -; RV32-NEXT: ret -; -; RV64-LABEL: vector_interleave_store_v4i64_v2i64: -; RV64: # %bb.0: -; RV64-NEXT: vmv1r.v v10, v9 -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; RV64-NEXT: lui a1, %hi(.LCPI3_0) -; RV64-NEXT: addi a1, a1, %lo(.LCPI3_0) -; RV64-NEXT: vle64.v v12, (a1) -; RV64-NEXT: vslideup.vi v8, v10, 2 -; RV64-NEXT: vrgather.vv v10, v8, v12 -; RV64-NEXT: vse64.v v10, (a0) -; RV64-NEXT: ret +; CHECK-LABEL: vector_interleave_store_v4i64_v2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: ret %res = call <4 x i64> @llvm.experimental.vector.interleave2.v4i64(<2 x i64> %a, <2 x i64> %b) store <4 x i64> %res, ptr %p ret void @@ -98,11 +72,7 @@ ; CHECK-LABEL: vector_interleave_store_v4f16_v2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; CHECK-NEXT: vwaddu.vv v10, v8, v9 -; CHECK-NEXT: li a1, -1 -; CHECK-NEXT: vwmaccu.vx v10, a1, v9 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; CHECK-NEXT: vse16.v v10, (a0) +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret %res = call <4 x half> @llvm.experimental.vector.interleave2.v4f16(<2 x half> %a, <2 x half> %b) store <4 x half> %res, ptr %p @@ -113,11 +83,7 @@ ; CHECK-LABEL: vector_interleave_store_v8f16_v4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; CHECK-NEXT: vwaddu.vv v10, v8, v9 -; CHECK-NEXT: li a1, -1 -; CHECK-NEXT: vwmaccu.vx v10, a1, v9 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vse16.v v10, (a0) +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret %res = call <8 x half> @llvm.experimental.vector.interleave2.v8f16(<4 x half> %a, <4 x half> %b) store <8 x half> %res, ptr %p @@ -128,11 +94,7 @@ ; CHECK-LABEL: vector_interleave_store_v4f32_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma -; CHECK-NEXT: vwaddu.vv v10, v8, v9 -; CHECK-NEXT: li a1, -1 -; CHECK-NEXT: vwmaccu.vx v10, a1, v9 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vse32.v v10, (a0) +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret %res = call <4 x float> @llvm.experimental.vector.interleave2.v4f32(<2 x float> %a, <2 x float> %b) store <4 x float> %res, ptr %p @@ -143,11 +105,7 @@ ; CHECK-LABEL: vector_interleave_store_v16f16_v8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vwaddu.vv v10, v8, v9 -; CHECK-NEXT: li a1, -1 -; CHECK-NEXT: vwmaccu.vx v10, a1, v9 -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma -; CHECK-NEXT: vse16.v v10, (a0) +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret %res = call <16 x half> @llvm.experimental.vector.interleave2.v16f16(<8 x half> %a, <8 x half> %b) store <16 x half> %res, ptr %p @@ -158,11 +116,7 @@ ; CHECK-LABEL: vector_interleave_store_v8f32_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vwaddu.vv v10, v8, v9 -; CHECK-NEXT: li a1, -1 -; CHECK-NEXT: vwmaccu.vx v10, a1, v9 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; CHECK-NEXT: vse32.v v10, (a0) +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret %res = call <8 x float> @llvm.experimental.vector.interleave2.v8f32(<4 x float> %a, <4 x float> %b) store <8 x float> %res, ptr %p @@ -170,29 +124,11 @@ } define void @vector_interleave_store_v4f64_v2f64(<2 x double> %a, <2 x double> %b, ptr %p) { -; RV32-LABEL: vector_interleave_store_v4f64_v2f64: -; RV32: # %bb.0: -; RV32-NEXT: vmv1r.v v10, v9 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; RV32-NEXT: lui a1, %hi(.LCPI9_0) -; RV32-NEXT: addi a1, a1, %lo(.LCPI9_0) -; RV32-NEXT: vle16.v v12, (a1) -; RV32-NEXT: vslideup.vi v8, v10, 2 -; RV32-NEXT: vrgatherei16.vv v10, v8, v12 -; RV32-NEXT: vse64.v v10, (a0) -; RV32-NEXT: ret -; -; RV64-LABEL: vector_interleave_store_v4f64_v2f64: -; RV64: # %bb.0: -; RV64-NEXT: vmv1r.v v10, v9 -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; RV64-NEXT: lui a1, %hi(.LCPI9_0) -; RV64-NEXT: addi a1, a1, %lo(.LCPI9_0) -; RV64-NEXT: vle64.v v12, (a1) -; RV64-NEXT: vslideup.vi v8, v10, 2 -; RV64-NEXT: vrgather.vv v10, v8, v12 -; RV64-NEXT: vse64.v v10, (a0) -; RV64-NEXT: ret +; CHECK-LABEL: vector_interleave_store_v4f64_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: ret %res = call <4 x double> @llvm.experimental.vector.interleave2.v4f64(<2 x double> %a, <2 x double> %b) store <4 x double> %res, ptr %p ret void @@ -205,3 +141,6 @@ declare <16 x half> @llvm.experimental.vector.interleave2.v16f16(<8 x half>, <8 x half>) declare <8 x float> @llvm.experimental.vector.interleave2.v8f32(<4 x float>, <4 x float>) declare <4 x double> @llvm.experimental.vector.interleave2.v4f64(<2 x double>, <2 x double>) +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; RV32: {{.*}} +; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll --- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll @@ -36,10 +36,7 @@ ; CHECK-LABEL: vector_interleave_store_nxv16i16_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma -; CHECK-NEXT: vwaddu.vv v12, v8, v10 -; CHECK-NEXT: li a1, -1 -; CHECK-NEXT: vwmaccu.vx v12, a1, v10 -; CHECK-NEXT: vs4r.v v12, (a0) +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv16i16( %a, %b) store %res, ptr %p @@ -50,10 +47,7 @@ ; CHECK-LABEL: vector_interleave_store_nxv8i32_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma -; CHECK-NEXT: vwaddu.vv v12, v8, v10 -; CHECK-NEXT: li a1, -1 -; CHECK-NEXT: vwmaccu.vx v12, a1, v10 -; CHECK-NEXT: vs4r.v v12, (a0) +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv8i32( %a, %b) store %res, ptr %p @@ -63,17 +57,8 @@ define void @vector_interleave_store_nxv4i64_nxv2i64( %a, %b, ptr %p) { ; CHECK-LABEL: vector_interleave_store_nxv4i64_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu -; CHECK-NEXT: vid.v v12 -; CHECK-NEXT: vsrl.vi v13, v12, 1 -; CHECK-NEXT: vand.vi v12, v12, 1 -; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a1, a1, 2 -; CHECK-NEXT: vadd.vx v13, v13, a1, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vrgatherei16.vv v16, v8, v13 -; CHECK-NEXT: vs4r.v v16, (a0) +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv4i64( %a, %b) store %res, ptr %p @@ -83,17 +68,8 @@ define void @vector_interleave_store_nxv8i64_nxv4i64( %a, %b, ptr %p) { ; CHECK-LABEL: vector_interleave_store_nxv8i64_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu -; CHECK-NEXT: vid.v v16 -; CHECK-NEXT: vand.vi v18, v16, 1 -; CHECK-NEXT: vmsne.vi v0, v18, 0 -; CHECK-NEXT: vsrl.vi v16, v16, 1 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a1, a1, 1 -; CHECK-NEXT: vadd.vx v16, v16, a1, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vs8r.v v24, (a0) +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv8i64( %a, %b) store %res, ptr %p @@ -169,17 +145,7 @@ ; CHECK-LABEL: vector_interleave_store_nxv4f16_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma -; CHECK-NEXT: vwaddu.vv v10, v8, v9 -; CHECK-NEXT: li a1, -1 -; CHECK-NEXT: vwmaccu.vx v10, a1, v9 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a1, a1, 2 -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma -; CHECK-NEXT: vslidedown.vx v8, v10, a1 -; CHECK-NEXT: add a2, a1, a1 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, tu, ma -; CHECK-NEXT: vslideup.vx v10, v8, a1 -; CHECK-NEXT: vs1r.v v10, (a0) +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv4f16( %a, %b) store %res, ptr %p @@ -190,10 +156,7 @@ ; CHECK-LABEL: vector_interleave_store_nxv8f16_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma -; CHECK-NEXT: vwaddu.vv v10, v8, v9 -; CHECK-NEXT: li a1, -1 -; CHECK-NEXT: vwmaccu.vx v10, a1, v9 -; CHECK-NEXT: vs2r.v v10, (a0) +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv8f16( %a, %b) store %res, ptr %p @@ -204,10 +167,7 @@ ; CHECK-LABEL: vector_interleave_store_nxv4f32_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma -; CHECK-NEXT: vwaddu.vv v10, v8, v9 -; CHECK-NEXT: li a1, -1 -; CHECK-NEXT: vwmaccu.vx v10, a1, v9 -; CHECK-NEXT: vs2r.v v10, (a0) +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv4f32( %a, %b) store %res, ptr %p @@ -218,10 +178,7 @@ ; CHECK-LABEL: vector_interleave_store_nxv16f16_nxv8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma -; CHECK-NEXT: vwaddu.vv v12, v8, v10 -; CHECK-NEXT: li a1, -1 -; CHECK-NEXT: vwmaccu.vx v12, a1, v10 -; CHECK-NEXT: vs4r.v v12, (a0) +; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv16f16( %a, %b) store %res, ptr %p @@ -232,10 +189,7 @@ ; CHECK-LABEL: vector_interleave_store_nxv8f32_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma -; CHECK-NEXT: vwaddu.vv v12, v8, v10 -; CHECK-NEXT: li a1, -1 -; CHECK-NEXT: vwmaccu.vx v12, a1, v10 -; CHECK-NEXT: vs4r.v v12, (a0) +; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv8f32( %a, %b) store %res, ptr %p @@ -245,17 +199,8 @@ define void @vector_interleave_store_nxv4f64_nxv2f64( %a, %b, ptr %p) { ; CHECK-LABEL: vector_interleave_store_nxv4f64_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu -; CHECK-NEXT: vid.v v12 -; CHECK-NEXT: vsrl.vi v13, v12, 1 -; CHECK-NEXT: vand.vi v12, v12, 1 -; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a1, a1, 2 -; CHECK-NEXT: vadd.vx v13, v13, a1, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vrgatherei16.vv v16, v8, v13 -; CHECK-NEXT: vs4r.v v16, (a0) +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv4f64( %a, %b) store %res, ptr %p