diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -536,6 +536,7 @@ setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); setOperationAction(ISD::BUILD_VECTOR, VT, Custom); + setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); setOperationAction(ISD::LOAD, VT, Custom); setOperationAction(ISD::STORE, VT, Custom); @@ -1494,6 +1495,20 @@ return lowerBUILD_VECTOR(Op, DAG, Subtarget); case ISD::VECTOR_SHUFFLE: return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget); + case ISD::CONCAT_VECTORS: { + // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is + // better than going through the stack, as the default expansion does. + SDLoc DL(Op); + MVT VT = Op.getSimpleValueType(); + assert(VT.isFixedLengthVector() && "Unexpected CONCAT_VECTORS lowering"); + unsigned NumOpElts = + Op.getOperand(0).getSimpleValueType().getVectorNumElements(); + SDValue Vec = DAG.getUNDEF(VT); + for (const auto &OpIdx : enumerate(Op->ops())) + Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(), + DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL)); + return Vec; + } case ISD::LOAD: return lowerFixedLengthVectorLoadToRVV(Op, DAG); case ISD::STORE: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll @@ -295,15 +295,13 @@ ; ; LMULMAX1-LABEL: fp2si_v8f64_v8i8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi sp, sp, -16 -; LMULMAX1-NEXT: .cfi_def_cfa_offset 16 -; LMULMAX1-NEXT: vsetivli a2, 2, e64,m1,ta,mu -; LMULMAX1-NEXT: vle64.v v25, (a0) +; LMULMAX1-NEXT: addi a2, a0, 16 +; LMULMAX1-NEXT: vsetivli a3, 2, e64,m1,ta,mu +; LMULMAX1-NEXT: vle64.v v25, (a2) +; LMULMAX1-NEXT: vle64.v v26, (a0) ; LMULMAX1-NEXT: addi a2, a0, 32 -; LMULMAX1-NEXT: vle64.v v26, (a2) -; LMULMAX1-NEXT: addi a2, a0, 48 ; LMULMAX1-NEXT: vle64.v v27, (a2) -; LMULMAX1-NEXT: addi a0, a0, 16 +; LMULMAX1-NEXT: addi a0, a0, 48 ; LMULMAX1-NEXT: vle64.v v28, (a0) ; LMULMAX1-NEXT: vsetivli a0, 2, e32,mf2,ta,mu ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v29, v27 @@ -311,49 +309,43 @@ ; LMULMAX1-NEXT: vnsrl.wi v27, v29, 0 ; LMULMAX1-NEXT: vsetivli a0, 2, e8,mf4,ta,mu ; LMULMAX1-NEXT: vnsrl.wi v29, v27, 0 -; LMULMAX1-NEXT: addi a0, sp, 6 -; LMULMAX1-NEXT: vsetivli a2, 2, e8,m1,ta,mu -; LMULMAX1-NEXT: vse8.v v29, (a0) +; LMULMAX1-NEXT: vsetivli a0, 4, e8,m1,ta,mu +; LMULMAX1-NEXT: vmv.v.i v27, 0 +; LMULMAX1-NEXT: vsetivli a0, 2, e8,m1,tu,mu +; LMULMAX1-NEXT: vmv1r.v v30, v27 +; LMULMAX1-NEXT: vslideup.vi v30, v29, 0 ; LMULMAX1-NEXT: vsetivli a0, 2, e32,mf2,ta,mu -; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v27, v28 +; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v29, v28 ; LMULMAX1-NEXT: vsetivli a0, 2, e16,mf2,ta,mu -; LMULMAX1-NEXT: vnsrl.wi v28, v27, 0 +; LMULMAX1-NEXT: vnsrl.wi v28, v29, 0 ; LMULMAX1-NEXT: vsetivli a0, 2, e8,mf4,ta,mu -; LMULMAX1-NEXT: vnsrl.wi v27, v28, 0 -; LMULMAX1-NEXT: addi a0, sp, 2 -; LMULMAX1-NEXT: vsetivli a2, 2, e8,m1,ta,mu -; LMULMAX1-NEXT: vse8.v v27, (a0) +; LMULMAX1-NEXT: vnsrl.wi v29, v28, 0 +; LMULMAX1-NEXT: vsetivli a0, 4, e8,m1,tu,mu +; LMULMAX1-NEXT: vslideup.vi v30, v29, 2 ; LMULMAX1-NEXT: vsetivli a0, 2, e32,mf2,ta,mu -; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v27, v26 +; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v28, v26 ; LMULMAX1-NEXT: vsetivli a0, 2, e16,mf2,ta,mu -; LMULMAX1-NEXT: vnsrl.wi v26, v27, 0 +; LMULMAX1-NEXT: vnsrl.wi v26, v28, 0 ; LMULMAX1-NEXT: vsetivli a0, 2, e8,mf4,ta,mu -; LMULMAX1-NEXT: vnsrl.wi v27, v26, 0 -; LMULMAX1-NEXT: vsetivli a0, 2, e8,m1,ta,mu -; LMULMAX1-NEXT: addi a0, sp, 4 -; LMULMAX1-NEXT: vse8.v v27, (a0) -; LMULMAX1-NEXT: vsetivli a0, 4, e8,m1,ta,mu -; LMULMAX1-NEXT: addi a0, sp, 4 -; LMULMAX1-NEXT: vle8.v v26, (a0) -; LMULMAX1-NEXT: addi a0, sp, 12 -; LMULMAX1-NEXT: vse8.v v26, (a0) +; LMULMAX1-NEXT: vnsrl.wi v28, v26, 0 +; LMULMAX1-NEXT: vsetivli a0, 2, e8,m1,tu,mu +; LMULMAX1-NEXT: vslideup.vi v27, v28, 0 ; LMULMAX1-NEXT: vsetivli a0, 2, e32,mf2,ta,mu ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v26, v25 ; LMULMAX1-NEXT: vsetivli a0, 2, e16,mf2,ta,mu ; LMULMAX1-NEXT: vnsrl.wi v25, v26, 0 ; LMULMAX1-NEXT: vsetivli a0, 2, e8,mf4,ta,mu ; LMULMAX1-NEXT: vnsrl.wi v26, v25, 0 -; LMULMAX1-NEXT: vsetivli a0, 2, e8,m1,ta,mu -; LMULMAX1-NEXT: vse8.v v26, (sp) -; LMULMAX1-NEXT: vsetivli a0, 4, e8,m1,ta,mu -; LMULMAX1-NEXT: vle8.v v25, (sp) -; LMULMAX1-NEXT: addi a0, sp, 8 -; LMULMAX1-NEXT: vse8.v v25, (a0) +; LMULMAX1-NEXT: vsetivli a0, 4, e8,m1,tu,mu +; LMULMAX1-NEXT: vslideup.vi v27, v26, 2 +; LMULMAX1-NEXT: vsetivli a0, 8, e8,m1,ta,mu +; LMULMAX1-NEXT: vmv.v.i v25, 0 +; LMULMAX1-NEXT: vsetivli a0, 4, e8,m1,tu,mu +; LMULMAX1-NEXT: vslideup.vi v25, v27, 0 +; LMULMAX1-NEXT: vsetivli a0, 8, e8,m1,tu,mu +; LMULMAX1-NEXT: vslideup.vi v25, v30, 4 ; LMULMAX1-NEXT: vsetivli a0, 8, e8,m1,ta,mu -; LMULMAX1-NEXT: addi a0, sp, 8 -; LMULMAX1-NEXT: vle8.v v25, (a0) ; LMULMAX1-NEXT: vse8.v v25, (a1) -; LMULMAX1-NEXT: addi sp, sp, 16 ; LMULMAX1-NEXT: ret %a = load <8 x double>, <8 x double>* %x %d = fptosi <8 x double> %a to <8 x i8> @@ -378,15 +370,13 @@ ; ; LMULMAX1-LABEL: fp2ui_v8f64_v8i8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi sp, sp, -16 -; LMULMAX1-NEXT: .cfi_def_cfa_offset 16 -; LMULMAX1-NEXT: vsetivli a2, 2, e64,m1,ta,mu -; LMULMAX1-NEXT: vle64.v v25, (a0) +; LMULMAX1-NEXT: addi a2, a0, 16 +; LMULMAX1-NEXT: vsetivli a3, 2, e64,m1,ta,mu +; LMULMAX1-NEXT: vle64.v v25, (a2) +; LMULMAX1-NEXT: vle64.v v26, (a0) ; LMULMAX1-NEXT: addi a2, a0, 32 -; LMULMAX1-NEXT: vle64.v v26, (a2) -; LMULMAX1-NEXT: addi a2, a0, 48 ; LMULMAX1-NEXT: vle64.v v27, (a2) -; LMULMAX1-NEXT: addi a0, a0, 16 +; LMULMAX1-NEXT: addi a0, a0, 48 ; LMULMAX1-NEXT: vle64.v v28, (a0) ; LMULMAX1-NEXT: vsetivli a0, 2, e32,mf2,ta,mu ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v29, v27 @@ -394,49 +384,43 @@ ; LMULMAX1-NEXT: vnsrl.wi v27, v29, 0 ; LMULMAX1-NEXT: vsetivli a0, 2, e8,mf4,ta,mu ; LMULMAX1-NEXT: vnsrl.wi v29, v27, 0 -; LMULMAX1-NEXT: addi a0, sp, 6 -; LMULMAX1-NEXT: vsetivli a2, 2, e8,m1,ta,mu -; LMULMAX1-NEXT: vse8.v v29, (a0) +; LMULMAX1-NEXT: vsetivli a0, 4, e8,m1,ta,mu +; LMULMAX1-NEXT: vmv.v.i v27, 0 +; LMULMAX1-NEXT: vsetivli a0, 2, e8,m1,tu,mu +; LMULMAX1-NEXT: vmv1r.v v30, v27 +; LMULMAX1-NEXT: vslideup.vi v30, v29, 0 ; LMULMAX1-NEXT: vsetivli a0, 2, e32,mf2,ta,mu -; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v27, v28 +; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v29, v28 ; LMULMAX1-NEXT: vsetivli a0, 2, e16,mf2,ta,mu -; LMULMAX1-NEXT: vnsrl.wi v28, v27, 0 +; LMULMAX1-NEXT: vnsrl.wi v28, v29, 0 ; LMULMAX1-NEXT: vsetivli a0, 2, e8,mf4,ta,mu -; LMULMAX1-NEXT: vnsrl.wi v27, v28, 0 -; LMULMAX1-NEXT: addi a0, sp, 2 -; LMULMAX1-NEXT: vsetivli a2, 2, e8,m1,ta,mu -; LMULMAX1-NEXT: vse8.v v27, (a0) +; LMULMAX1-NEXT: vnsrl.wi v29, v28, 0 +; LMULMAX1-NEXT: vsetivli a0, 4, e8,m1,tu,mu +; LMULMAX1-NEXT: vslideup.vi v30, v29, 2 ; LMULMAX1-NEXT: vsetivli a0, 2, e32,mf2,ta,mu -; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v27, v26 +; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v28, v26 ; LMULMAX1-NEXT: vsetivli a0, 2, e16,mf2,ta,mu -; LMULMAX1-NEXT: vnsrl.wi v26, v27, 0 +; LMULMAX1-NEXT: vnsrl.wi v26, v28, 0 ; LMULMAX1-NEXT: vsetivli a0, 2, e8,mf4,ta,mu -; LMULMAX1-NEXT: vnsrl.wi v27, v26, 0 -; LMULMAX1-NEXT: vsetivli a0, 2, e8,m1,ta,mu -; LMULMAX1-NEXT: addi a0, sp, 4 -; LMULMAX1-NEXT: vse8.v v27, (a0) -; LMULMAX1-NEXT: vsetivli a0, 4, e8,m1,ta,mu -; LMULMAX1-NEXT: addi a0, sp, 4 -; LMULMAX1-NEXT: vle8.v v26, (a0) -; LMULMAX1-NEXT: addi a0, sp, 12 -; LMULMAX1-NEXT: vse8.v v26, (a0) +; LMULMAX1-NEXT: vnsrl.wi v28, v26, 0 +; LMULMAX1-NEXT: vsetivli a0, 2, e8,m1,tu,mu +; LMULMAX1-NEXT: vslideup.vi v27, v28, 0 ; LMULMAX1-NEXT: vsetivli a0, 2, e32,mf2,ta,mu ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v26, v25 ; LMULMAX1-NEXT: vsetivli a0, 2, e16,mf2,ta,mu ; LMULMAX1-NEXT: vnsrl.wi v25, v26, 0 ; LMULMAX1-NEXT: vsetivli a0, 2, e8,mf4,ta,mu ; LMULMAX1-NEXT: vnsrl.wi v26, v25, 0 -; LMULMAX1-NEXT: vsetivli a0, 2, e8,m1,ta,mu -; LMULMAX1-NEXT: vse8.v v26, (sp) -; LMULMAX1-NEXT: vsetivli a0, 4, e8,m1,ta,mu -; LMULMAX1-NEXT: vle8.v v25, (sp) -; LMULMAX1-NEXT: addi a0, sp, 8 -; LMULMAX1-NEXT: vse8.v v25, (a0) +; LMULMAX1-NEXT: vsetivli a0, 4, e8,m1,tu,mu +; LMULMAX1-NEXT: vslideup.vi v27, v26, 2 +; LMULMAX1-NEXT: vsetivli a0, 8, e8,m1,ta,mu +; LMULMAX1-NEXT: vmv.v.i v25, 0 +; LMULMAX1-NEXT: vsetivli a0, 4, e8,m1,tu,mu +; LMULMAX1-NEXT: vslideup.vi v25, v27, 0 +; LMULMAX1-NEXT: vsetivli a0, 8, e8,m1,tu,mu +; LMULMAX1-NEXT: vslideup.vi v25, v30, 4 ; LMULMAX1-NEXT: vsetivli a0, 8, e8,m1,ta,mu -; LMULMAX1-NEXT: addi a0, sp, 8 -; LMULMAX1-NEXT: vle8.v v25, (a0) ; LMULMAX1-NEXT: vse8.v v25, (a1) -; LMULMAX1-NEXT: addi sp, sp, 16 ; LMULMAX1-NEXT: ret %a = load <8 x double>, <8 x double>* %x %d = fptoui <8 x double> %a to <8 x i8> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll @@ -211,31 +211,26 @@ ; ; LMULMAX1-LABEL: trunc_v8i8_v8i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi sp, sp, -16 -; LMULMAX1-NEXT: .cfi_def_cfa_offset 16 ; LMULMAX1-NEXT: vsetivli a2, 4, e32,m1,ta,mu -; LMULMAX1-NEXT: addi a2, a0, 16 -; LMULMAX1-NEXT: vle32.v v25, (a2) +; LMULMAX1-NEXT: vle32.v v25, (a0) +; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle32.v v26, (a0) ; LMULMAX1-NEXT: vsetivli a0, 4, e16,mf2,ta,mu ; LMULMAX1-NEXT: vnsrl.wi v27, v25, 0 ; LMULMAX1-NEXT: vsetivli a0, 4, e8,mf4,ta,mu ; LMULMAX1-NEXT: vnsrl.wi v25, v27, 0 -; LMULMAX1-NEXT: addi a0, sp, 12 -; LMULMAX1-NEXT: vsetivli a2, 4, e8,m1,ta,mu -; LMULMAX1-NEXT: vse8.v v25, (a0) +; LMULMAX1-NEXT: vsetivli a0, 8, e8,m1,ta,mu +; LMULMAX1-NEXT: vmv.v.i v27, 0 +; LMULMAX1-NEXT: vsetivli a0, 4, e8,m1,tu,mu +; LMULMAX1-NEXT: vslideup.vi v27, v25, 0 ; LMULMAX1-NEXT: vsetivli a0, 4, e16,mf2,ta,mu ; LMULMAX1-NEXT: vnsrl.wi v25, v26, 0 ; LMULMAX1-NEXT: vsetivli a0, 4, e8,mf4,ta,mu ; LMULMAX1-NEXT: vnsrl.wi v26, v25, 0 -; LMULMAX1-NEXT: vsetivli a0, 4, e8,m1,ta,mu -; LMULMAX1-NEXT: addi a0, sp, 8 -; LMULMAX1-NEXT: vse8.v v26, (a0) +; LMULMAX1-NEXT: vsetivli a0, 8, e8,m1,tu,mu +; LMULMAX1-NEXT: vslideup.vi v27, v26, 4 ; LMULMAX1-NEXT: vsetivli a0, 8, e8,m1,ta,mu -; LMULMAX1-NEXT: addi a0, sp, 8 -; LMULMAX1-NEXT: vle8.v v25, (a0) -; LMULMAX1-NEXT: vse8.v v25, (a1) -; LMULMAX1-NEXT: addi sp, sp, 16 +; LMULMAX1-NEXT: vse8.v v27, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = trunc <8 x i32> %a to <8 x i8>