diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1387,16 +1387,60 @@ ShuffleVectorSDNode *SVN = cast(Op.getNode()); if (SVN->isSplat()) { - int Lane = SVN->getSplatIndex(); + const int Lane = SVN->getSplatIndex(); if (Lane >= 0) { + MVT XLenVT = Subtarget.getXLenVT(); MVT ContainerVT = RISCVTargetLowering::getContainerForFixedLengthVector( DAG, VT, Subtarget); + MVT SVT = VT.getVectorElementType(); + + SDValue Mask, VL; + std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); + + // Turn splatted vector load into a scalar loads and splat as long as the + // element the scalar load is legal. VectorCombine can introduce this + // pattern. + SDValue V = V1; + // Peek through CONCAT_VECTORS as VectorCombine can concat a vector + // with undef. + // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts? + int Offset = Lane; + if (V.getOpcode() == ISD::CONCAT_VECTORS && V.hasOneUse()) { + int OpElements = + V.getOperand(0).getSimpleValueType().getVectorNumElements(); + V = V.getOperand(Offset / OpElements); + Offset %= OpElements; + } + + // We need to ensure the load isn't atomic or volatile. For integers the + // scalar can't be larger than XLen. We also avoid creating an unaligned + // scalar load. + if (ISD::isNormalLoad(V.getNode()) && cast(V)->isSimple() && + V.hasOneUse() && (SVT.isFloatingPoint() || SVT.bitsLE(XLenVT)) && + cast(V)->getAlign() >= SVT.getStoreSize()) { + auto *Ld = cast(V); + Offset *= SVT.getStoreSize(); + SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(), + TypeSize::Fixed(Offset), DL); + if (SVT.isFloatingPoint()) + V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr, + Ld->getPointerInfo().getWithOffset(Offset), + Ld->getOriginalAlign(), + Ld->getMemOperand()->getFlags()); + else + V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr, + Ld->getPointerInfo().getWithOffset(Offset), SVT, + Ld->getOriginalAlign(), + Ld->getMemOperand()->getFlags()); + unsigned Opc = + VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL; + SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL); + return convertFromScalableVector(VT, Splat, DAG, Subtarget); + } V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget); assert(Lane < (int)NumElts && "Unexpected lane!"); - SDValue Mask, VL; - std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1, DAG.getConstant(Lane, DL, XLenVT), Mask, VL); diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll @@ -7,10 +7,10 @@ define void @gather_const_v8f16(<8 x half>* %x) { ; CHECK-LABEL: gather_const_v8f16: ; CHECK: # %bb.0: +; CHECK-NEXT: flh ft0, 10(a0) ; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu -; CHECK-NEXT: vle16.v v25, (a0) -; CHECK-NEXT: vrgather.vi v26, v25, 5 -; CHECK-NEXT: vse16.v v26, (a0) +; CHECK-NEXT: vfmv.v.f v25, ft0 +; CHECK-NEXT: vse16.v v25, (a0) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = extractelement <8 x half> %a, i32 5 @@ -23,10 +23,10 @@ define void @gather_const_v4f32(<4 x float>* %x) { ; CHECK-LABEL: gather_const_v4f32: ; CHECK: # %bb.0: +; CHECK-NEXT: flw ft0, 8(a0) ; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu -; CHECK-NEXT: vle32.v v25, (a0) -; CHECK-NEXT: vrgather.vi v26, v25, 2 -; CHECK-NEXT: vse32.v v26, (a0) +; CHECK-NEXT: vfmv.v.f v25, ft0 +; CHECK-NEXT: vse32.v v25, (a0) ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = extractelement <4 x float> %a, i32 2 @@ -39,10 +39,10 @@ define void @gather_const_v2f64(<2 x double>* %x) { ; CHECK-LABEL: gather_const_v2f64: ; CHECK: # %bb.0: +; CHECK-NEXT: fld ft0, 0(a0) ; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; CHECK-NEXT: vle64.v v25, (a0) -; CHECK-NEXT: vrgather.vi v26, v25, 0 -; CHECK-NEXT: vse64.v v26, (a0) +; CHECK-NEXT: vfmv.v.f v25, ft0 +; CHECK-NEXT: vse64.v v25, (a0) ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = extractelement <2 x double> %a, i32 0 @@ -55,34 +55,33 @@ define void @gather_const_v64f16(<64 x half>* %x) { ; LMULMAX8-LABEL: gather_const_v64f16: ; LMULMAX8: # %bb.0: +; LMULMAX8-NEXT: flh ft0, 94(a0) ; LMULMAX8-NEXT: addi a1, zero, 64 ; LMULMAX8-NEXT: vsetvli a1, a1, e16,m8,ta,mu -; LMULMAX8-NEXT: vle16.v v8, (a0) -; LMULMAX8-NEXT: addi a1, zero, 47 -; LMULMAX8-NEXT: vrgather.vx v16, v8, a1 -; LMULMAX8-NEXT: vse16.v v16, (a0) +; LMULMAX8-NEXT: vfmv.v.f v8, ft0 +; LMULMAX8-NEXT: vse16.v v8, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v64f16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi a1, a0, 80 -; LMULMAX1-NEXT: vsetivli a2, 8, e16,m1,ta,mu -; LMULMAX1-NEXT: vle16.v v25, (a1) ; LMULMAX1-NEXT: addi a6, a0, 16 ; LMULMAX1-NEXT: addi a7, a0, 48 -; LMULMAX1-NEXT: addi a4, a0, 32 +; LMULMAX1-NEXT: addi t0, a0, 32 +; LMULMAX1-NEXT: addi a4, a0, 80 ; LMULMAX1-NEXT: addi a5, a0, 64 -; LMULMAX1-NEXT: addi a2, a0, 112 -; LMULMAX1-NEXT: addi a3, a0, 96 -; LMULMAX1-NEXT: vrgather.vi v26, v25, 7 -; LMULMAX1-NEXT: vse16.v v26, (a3) -; LMULMAX1-NEXT: vse16.v v26, (a2) -; LMULMAX1-NEXT: vse16.v v26, (a5) -; LMULMAX1-NEXT: vse16.v v26, (a1) -; LMULMAX1-NEXT: vse16.v v26, (a4) -; LMULMAX1-NEXT: vse16.v v26, (a7) -; LMULMAX1-NEXT: vse16.v v26, (a0) -; LMULMAX1-NEXT: vse16.v v26, (a6) +; LMULMAX1-NEXT: flh ft0, 94(a0) +; LMULMAX1-NEXT: addi a1, a0, 112 +; LMULMAX1-NEXT: addi a2, a0, 96 +; LMULMAX1-NEXT: vsetivli a3, 8, e16,m1,ta,mu +; LMULMAX1-NEXT: vfmv.v.f v25, ft0 +; LMULMAX1-NEXT: vse16.v v25, (a2) +; LMULMAX1-NEXT: vse16.v v25, (a1) +; LMULMAX1-NEXT: vse16.v v25, (a5) +; LMULMAX1-NEXT: vse16.v v25, (a4) +; LMULMAX1-NEXT: vse16.v v25, (t0) +; LMULMAX1-NEXT: vse16.v v25, (a7) +; LMULMAX1-NEXT: vse16.v v25, (a0) +; LMULMAX1-NEXT: vse16.v v25, (a6) ; LMULMAX1-NEXT: ret %a = load <64 x half>, <64 x half>* %x %b = extractelement <64 x half> %a, i32 47 @@ -95,33 +94,33 @@ define void @gather_const_v32f32(<32 x float>* %x) { ; LMULMAX8-LABEL: gather_const_v32f32: ; LMULMAX8: # %bb.0: +; LMULMAX8-NEXT: flw ft0, 68(a0) ; LMULMAX8-NEXT: addi a1, zero, 32 ; LMULMAX8-NEXT: vsetvli a1, a1, e32,m8,ta,mu -; LMULMAX8-NEXT: vle32.v v8, (a0) -; LMULMAX8-NEXT: vrgather.vi v16, v8, 17 -; LMULMAX8-NEXT: vse32.v v16, (a0) +; LMULMAX8-NEXT: vfmv.v.f v8, ft0 +; LMULMAX8-NEXT: vse32.v v8, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v32f32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi a1, a0, 64 -; LMULMAX1-NEXT: vsetivli a2, 4, e32,m1,ta,mu -; LMULMAX1-NEXT: vle32.v v25, (a1) ; LMULMAX1-NEXT: addi a6, a0, 16 ; LMULMAX1-NEXT: addi a7, a0, 48 -; LMULMAX1-NEXT: addi a4, a0, 32 -; LMULMAX1-NEXT: addi a5, a0, 80 -; LMULMAX1-NEXT: addi a2, a0, 112 -; LMULMAX1-NEXT: addi a3, a0, 96 -; LMULMAX1-NEXT: vrgather.vi v26, v25, 1 -; LMULMAX1-NEXT: vse32.v v26, (a3) -; LMULMAX1-NEXT: vse32.v v26, (a2) -; LMULMAX1-NEXT: vse32.v v26, (a1) -; LMULMAX1-NEXT: vse32.v v26, (a5) -; LMULMAX1-NEXT: vse32.v v26, (a4) -; LMULMAX1-NEXT: vse32.v v26, (a7) -; LMULMAX1-NEXT: vse32.v v26, (a0) -; LMULMAX1-NEXT: vse32.v v26, (a6) +; LMULMAX1-NEXT: addi t0, a0, 32 +; LMULMAX1-NEXT: addi a4, a0, 80 +; LMULMAX1-NEXT: addi a5, a0, 64 +; LMULMAX1-NEXT: flw ft0, 68(a0) +; LMULMAX1-NEXT: addi a1, a0, 112 +; LMULMAX1-NEXT: addi a2, a0, 96 +; LMULMAX1-NEXT: vsetivli a3, 4, e32,m1,ta,mu +; LMULMAX1-NEXT: vfmv.v.f v25, ft0 +; LMULMAX1-NEXT: vse32.v v25, (a2) +; LMULMAX1-NEXT: vse32.v v25, (a1) +; LMULMAX1-NEXT: vse32.v v25, (a5) +; LMULMAX1-NEXT: vse32.v v25, (a4) +; LMULMAX1-NEXT: vse32.v v25, (t0) +; LMULMAX1-NEXT: vse32.v v25, (a7) +; LMULMAX1-NEXT: vse32.v v25, (a0) +; LMULMAX1-NEXT: vse32.v v25, (a6) ; LMULMAX1-NEXT: ret %a = load <32 x float>, <32 x float>* %x %b = extractelement <32 x float> %a, i32 17 @@ -134,32 +133,32 @@ define void @gather_const_v16f64(<16 x double>* %x) { ; LMULMAX8-LABEL: gather_const_v16f64: ; LMULMAX8: # %bb.0: +; LMULMAX8-NEXT: fld ft0, 80(a0) ; LMULMAX8-NEXT: vsetivli a1, 16, e64,m8,ta,mu -; LMULMAX8-NEXT: vle64.v v8, (a0) -; LMULMAX8-NEXT: vrgather.vi v16, v8, 10 -; LMULMAX8-NEXT: vse64.v v16, (a0) +; LMULMAX8-NEXT: vfmv.v.f v8, ft0 +; LMULMAX8-NEXT: vse64.v v8, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v16f64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi a1, a0, 80 -; LMULMAX1-NEXT: vsetivli a2, 2, e64,m1,ta,mu -; LMULMAX1-NEXT: vle64.v v25, (a1) ; LMULMAX1-NEXT: addi a6, a0, 16 ; LMULMAX1-NEXT: addi a7, a0, 48 -; LMULMAX1-NEXT: addi a4, a0, 32 +; LMULMAX1-NEXT: addi t0, a0, 32 +; LMULMAX1-NEXT: addi a4, a0, 80 ; LMULMAX1-NEXT: addi a5, a0, 64 -; LMULMAX1-NEXT: addi a2, a0, 112 -; LMULMAX1-NEXT: addi a3, a0, 96 -; LMULMAX1-NEXT: vrgather.vi v26, v25, 0 -; LMULMAX1-NEXT: vse64.v v26, (a3) -; LMULMAX1-NEXT: vse64.v v26, (a2) -; LMULMAX1-NEXT: vse64.v v26, (a5) -; LMULMAX1-NEXT: vse64.v v26, (a1) -; LMULMAX1-NEXT: vse64.v v26, (a4) -; LMULMAX1-NEXT: vse64.v v26, (a7) -; LMULMAX1-NEXT: vse64.v v26, (a0) -; LMULMAX1-NEXT: vse64.v v26, (a6) +; LMULMAX1-NEXT: fld ft0, 80(a0) +; LMULMAX1-NEXT: addi a1, a0, 112 +; LMULMAX1-NEXT: addi a2, a0, 96 +; LMULMAX1-NEXT: vsetivli a3, 2, e64,m1,ta,mu +; LMULMAX1-NEXT: vfmv.v.f v25, ft0 +; LMULMAX1-NEXT: vse64.v v25, (a2) +; LMULMAX1-NEXT: vse64.v v25, (a1) +; LMULMAX1-NEXT: vse64.v v25, (a5) +; LMULMAX1-NEXT: vse64.v v25, (a4) +; LMULMAX1-NEXT: vse64.v v25, (t0) +; LMULMAX1-NEXT: vse64.v v25, (a7) +; LMULMAX1-NEXT: vse64.v v25, (a0) +; LMULMAX1-NEXT: vse64.v v25, (a6) ; LMULMAX1-NEXT: ret %a = load <16 x double>, <16 x double>* %x %b = extractelement <16 x double> %a, i32 10 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4 -; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4 -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 -; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4,LMULMAX4-RV32 +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4,LMULMAX4-RV64 +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1-RV32 +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1-RV64 define void @gather_const_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: gather_const_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu -; CHECK-NEXT: vle8.v v25, (a0) -; CHECK-NEXT: vrgather.vi v26, v25, 12 -; CHECK-NEXT: vse8.v v26, (a0) +; CHECK-NEXT: lb a1, 12(a0) +; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: vse8.v v25, (a0) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = extractelement <16 x i8> %a, i32 12 @@ -23,10 +23,10 @@ define void @gather_const_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: gather_const_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu -; CHECK-NEXT: vle16.v v25, (a0) -; CHECK-NEXT: vrgather.vi v26, v25, 5 -; CHECK-NEXT: vse16.v v26, (a0) +; CHECK-NEXT: lh a1, 10(a0) +; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: vse16.v v25, (a0) ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = extractelement <8 x i16> %a, i32 5 @@ -39,10 +39,10 @@ define void @gather_const_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: gather_const_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu -; CHECK-NEXT: vle32.v v25, (a0) -; CHECK-NEXT: vrgather.vi v26, v25, 3 -; CHECK-NEXT: vse32.v v26, (a0) +; CHECK-NEXT: lw a1, 12(a0) +; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: vse32.v v25, (a0) ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = extractelement <4 x i32> %a, i32 3 @@ -53,13 +53,37 @@ } define void @gather_const_v2i64(<2 x i64>* %x) { -; CHECK-LABEL: gather_const_v2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu -; CHECK-NEXT: vle64.v v25, (a0) -; CHECK-NEXT: vrgather.vi v26, v25, 1 -; CHECK-NEXT: vse64.v v26, (a0) -; CHECK-NEXT: ret +; LMULMAX4-RV32-LABEL: gather_const_v2i64: +; LMULMAX4-RV32: # %bb.0: +; LMULMAX4-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu +; LMULMAX4-RV32-NEXT: vle64.v v25, (a0) +; LMULMAX4-RV32-NEXT: vrgather.vi v26, v25, 1 +; LMULMAX4-RV32-NEXT: vse64.v v26, (a0) +; LMULMAX4-RV32-NEXT: ret +; +; LMULMAX4-RV64-LABEL: gather_const_v2i64: +; LMULMAX4-RV64: # %bb.0: +; LMULMAX4-RV64-NEXT: ld a1, 8(a0) +; LMULMAX4-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu +; LMULMAX4-RV64-NEXT: vmv.v.x v25, a1 +; LMULMAX4-RV64-NEXT: vse64.v v25, (a0) +; LMULMAX4-RV64-NEXT: ret +; +; LMULMAX1-RV32-LABEL: gather_const_v2i64: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu +; LMULMAX1-RV32-NEXT: vle64.v v25, (a0) +; LMULMAX1-RV32-NEXT: vrgather.vi v26, v25, 1 +; LMULMAX1-RV32-NEXT: vse64.v v26, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: gather_const_v2i64: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: ld a1, 8(a0) +; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu +; LMULMAX1-RV64-NEXT: vmv.v.x v25, a1 +; LMULMAX1-RV64-NEXT: vse64.v v25, (a0) +; LMULMAX1-RV64-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = extractelement <2 x i64> %a, i32 1 %c = insertelement <2 x i64> undef, i64 %b, i32 0 @@ -71,26 +95,25 @@ define void @gather_const_v64i8(<64 x i8>* %x) { ; LMULMAX4-LABEL: gather_const_v64i8: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: addi a1, zero, 64 -; LMULMAX4-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; LMULMAX4-NEXT: vle8.v v28, (a0) -; LMULMAX4-NEXT: addi a1, zero, 32 -; LMULMAX4-NEXT: vrgather.vx v8, v28, a1 -; LMULMAX4-NEXT: vse8.v v8, (a0) +; LMULMAX4-NEXT: lb a1, 32(a0) +; LMULMAX4-NEXT: addi a2, zero, 64 +; LMULMAX4-NEXT: vsetvli a2, a2, e8,m4,ta,mu +; LMULMAX4-NEXT: vmv.v.x v28, a1 +; LMULMAX4-NEXT: vse8.v v28, (a0) ; LMULMAX4-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v64i8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi a1, a0, 32 -; LMULMAX1-NEXT: vsetivli a2, 16, e8,m1,ta,mu -; LMULMAX1-NEXT: vle8.v v25, (a1) -; LMULMAX1-NEXT: addi a2, a0, 16 +; LMULMAX1-NEXT: addi a1, a0, 16 +; LMULMAX1-NEXT: lb a2, 32(a0) ; LMULMAX1-NEXT: addi a3, a0, 48 -; LMULMAX1-NEXT: vrgather.vi v26, v25, 0 -; LMULMAX1-NEXT: vse8.v v26, (a1) -; LMULMAX1-NEXT: vse8.v v26, (a3) -; LMULMAX1-NEXT: vse8.v v26, (a0) -; LMULMAX1-NEXT: vse8.v v26, (a2) +; LMULMAX1-NEXT: addi a4, a0, 32 +; LMULMAX1-NEXT: vsetivli a5, 16, e8,m1,ta,mu +; LMULMAX1-NEXT: vmv.v.x v25, a2 +; LMULMAX1-NEXT: vse8.v v25, (a4) +; LMULMAX1-NEXT: vse8.v v25, (a3) +; LMULMAX1-NEXT: vse8.v v25, (a0) +; LMULMAX1-NEXT: vse8.v v25, (a1) ; LMULMAX1-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = extractelement <64 x i8> %a, i32 32 @@ -103,25 +126,25 @@ define void @gather_const_v16i16(<32 x i16>* %x) { ; LMULMAX4-LABEL: gather_const_v16i16: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: addi a1, zero, 32 -; LMULMAX4-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; LMULMAX4-NEXT: vle16.v v28, (a0) -; LMULMAX4-NEXT: vrgather.vi v8, v28, 25 -; LMULMAX4-NEXT: vse16.v v8, (a0) +; LMULMAX4-NEXT: lh a1, 50(a0) +; LMULMAX4-NEXT: addi a2, zero, 32 +; LMULMAX4-NEXT: vsetvli a2, a2, e16,m4,ta,mu +; LMULMAX4-NEXT: vmv.v.x v28, a1 +; LMULMAX4-NEXT: vse16.v v28, (a0) ; LMULMAX4-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v16i16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi a1, a0, 48 -; LMULMAX1-NEXT: vsetivli a2, 8, e16,m1,ta,mu -; LMULMAX1-NEXT: vle16.v v25, (a1) -; LMULMAX1-NEXT: addi a2, a0, 16 -; LMULMAX1-NEXT: addi a3, a0, 32 -; LMULMAX1-NEXT: vrgather.vi v26, v25, 1 -; LMULMAX1-NEXT: vse16.v v26, (a3) -; LMULMAX1-NEXT: vse16.v v26, (a1) -; LMULMAX1-NEXT: vse16.v v26, (a0) -; LMULMAX1-NEXT: vse16.v v26, (a2) +; LMULMAX1-NEXT: addi a1, a0, 16 +; LMULMAX1-NEXT: lh a2, 50(a0) +; LMULMAX1-NEXT: addi a3, a0, 48 +; LMULMAX1-NEXT: addi a4, a0, 32 +; LMULMAX1-NEXT: vsetivli a5, 8, e16,m1,ta,mu +; LMULMAX1-NEXT: vmv.v.x v25, a2 +; LMULMAX1-NEXT: vse16.v v25, (a4) +; LMULMAX1-NEXT: vse16.v v25, (a3) +; LMULMAX1-NEXT: vse16.v v25, (a0) +; LMULMAX1-NEXT: vse16.v v25, (a1) ; LMULMAX1-NEXT: ret %a = load <32 x i16>, <32 x i16>* %x %b = extractelement <32 x i16> %a, i32 25 @@ -134,24 +157,24 @@ define void @gather_const_v16i32(<16 x i32>* %x) { ; LMULMAX4-LABEL: gather_const_v16i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli a1, 16, e32,m4,ta,mu -; LMULMAX4-NEXT: vle32.v v28, (a0) -; LMULMAX4-NEXT: vrgather.vi v8, v28, 9 -; LMULMAX4-NEXT: vse32.v v8, (a0) +; LMULMAX4-NEXT: lw a1, 36(a0) +; LMULMAX4-NEXT: vsetivli a2, 16, e32,m4,ta,mu +; LMULMAX4-NEXT: vmv.v.x v28, a1 +; LMULMAX4-NEXT: vse32.v v28, (a0) ; LMULMAX4-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v16i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi a1, a0, 32 -; LMULMAX1-NEXT: vsetivli a2, 4, e32,m1,ta,mu -; LMULMAX1-NEXT: vle32.v v25, (a1) -; LMULMAX1-NEXT: addi a2, a0, 16 +; LMULMAX1-NEXT: addi a1, a0, 16 +; LMULMAX1-NEXT: lw a2, 36(a0) ; LMULMAX1-NEXT: addi a3, a0, 48 -; LMULMAX1-NEXT: vrgather.vi v26, v25, 1 -; LMULMAX1-NEXT: vse32.v v26, (a1) -; LMULMAX1-NEXT: vse32.v v26, (a3) -; LMULMAX1-NEXT: vse32.v v26, (a0) -; LMULMAX1-NEXT: vse32.v v26, (a2) +; LMULMAX1-NEXT: addi a4, a0, 32 +; LMULMAX1-NEXT: vsetivli a5, 4, e32,m1,ta,mu +; LMULMAX1-NEXT: vmv.v.x v25, a2 +; LMULMAX1-NEXT: vse32.v v25, (a4) +; LMULMAX1-NEXT: vse32.v v25, (a3) +; LMULMAX1-NEXT: vse32.v v25, (a0) +; LMULMAX1-NEXT: vse32.v v25, (a1) ; LMULMAX1-NEXT: ret %a = load <16 x i32>, <16 x i32>* %x %b = extractelement <16 x i32> %a, i32 9 @@ -162,27 +185,49 @@ } define void @gather_const_v8i64(<8 x i64>* %x) { -; LMULMAX4-LABEL: gather_const_v8i64: -; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli a1, 8, e64,m4,ta,mu -; LMULMAX4-NEXT: vle64.v v28, (a0) -; LMULMAX4-NEXT: vrgather.vi v8, v28, 3 -; LMULMAX4-NEXT: vse64.v v8, (a0) -; LMULMAX4-NEXT: ret +; LMULMAX4-RV32-LABEL: gather_const_v8i64: +; LMULMAX4-RV32: # %bb.0: +; LMULMAX4-RV32-NEXT: vsetivli a1, 8, e64,m4,ta,mu +; LMULMAX4-RV32-NEXT: vle64.v v28, (a0) +; LMULMAX4-RV32-NEXT: vrgather.vi v8, v28, 3 +; LMULMAX4-RV32-NEXT: vse64.v v8, (a0) +; LMULMAX4-RV32-NEXT: ret ; -; LMULMAX1-LABEL: gather_const_v8i64: -; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: addi a1, a0, 16 -; LMULMAX1-NEXT: vsetivli a2, 2, e64,m1,ta,mu -; LMULMAX1-NEXT: vle64.v v25, (a1) -; LMULMAX1-NEXT: addi a2, a0, 48 -; LMULMAX1-NEXT: addi a3, a0, 32 -; LMULMAX1-NEXT: vrgather.vi v26, v25, 1 -; LMULMAX1-NEXT: vse64.v v26, (a3) -; LMULMAX1-NEXT: vse64.v v26, (a2) -; LMULMAX1-NEXT: vse64.v v26, (a0) -; LMULMAX1-NEXT: vse64.v v26, (a1) -; LMULMAX1-NEXT: ret +; LMULMAX4-RV64-LABEL: gather_const_v8i64: +; LMULMAX4-RV64: # %bb.0: +; LMULMAX4-RV64-NEXT: ld a1, 24(a0) +; LMULMAX4-RV64-NEXT: vsetivli a2, 8, e64,m4,ta,mu +; LMULMAX4-RV64-NEXT: vmv.v.x v28, a1 +; LMULMAX4-RV64-NEXT: vse64.v v28, (a0) +; LMULMAX4-RV64-NEXT: ret +; +; LMULMAX1-RV32-LABEL: gather_const_v8i64: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: addi a1, a0, 16 +; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu +; LMULMAX1-RV32-NEXT: vle64.v v25, (a1) +; LMULMAX1-RV32-NEXT: addi a2, a0, 48 +; LMULMAX1-RV32-NEXT: addi a3, a0, 32 +; LMULMAX1-RV32-NEXT: vrgather.vi v26, v25, 1 +; LMULMAX1-RV32-NEXT: vse64.v v26, (a3) +; LMULMAX1-RV32-NEXT: vse64.v v26, (a2) +; LMULMAX1-RV32-NEXT: vse64.v v26, (a0) +; LMULMAX1-RV32-NEXT: vse64.v v26, (a1) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: gather_const_v8i64: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: addi a1, a0, 16 +; LMULMAX1-RV64-NEXT: ld a2, 24(a0) +; LMULMAX1-RV64-NEXT: addi a3, a0, 48 +; LMULMAX1-RV64-NEXT: addi a4, a0, 32 +; LMULMAX1-RV64-NEXT: vsetivli a5, 2, e64,m1,ta,mu +; LMULMAX1-RV64-NEXT: vmv.v.x v25, a2 +; LMULMAX1-RV64-NEXT: vse64.v v25, (a4) +; LMULMAX1-RV64-NEXT: vse64.v v25, (a3) +; LMULMAX1-RV64-NEXT: vse64.v v25, (a0) +; LMULMAX1-RV64-NEXT: vse64.v v25, (a1) +; LMULMAX1-RV64-NEXT: ret %a = load <8 x i64>, <8 x i64>* %x %b = extractelement <8 x i64> %a, i32 3 %c = insertelement <8 x i64> undef, i64 %b, i32 0 @@ -190,3 +235,35 @@ store <8 x i64> %d, <8 x i64>* %x ret void } + +define void @splat_concat_low(<4 x i16>* %x, <4 x i16>* %y, <8 x i16>* %z) { +; CHECK-LABEL: splat_concat_low: +; CHECK: # %bb.0: +; CHECK-NEXT: lh a0, 0(a0) +; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vse16.v v25, (a2) +; CHECK-NEXT: ret + %a = load <4 x i16>, <4 x i16>* %x + %b = load <4 x i16>, <4 x i16>* %y + %c = shufflevector <4 x i16> %a, <4 x i16> %b, <8 x i32> + %d = shufflevector <8 x i16> %c, <8 x i16> undef, <8 x i32> zeroinitializer + store <8 x i16> %d, <8 x i16>* %z + ret void +} + +define void @splat_concat_high(<4 x i16>* %x, <4 x i16>* %y, <8 x i16>* %z) { +; CHECK-LABEL: splat_concat_high: +; CHECK: # %bb.0: +; CHECK-NEXT: lh a0, 2(a1) +; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vse16.v v25, (a2) +; CHECK-NEXT: ret + %a = load <4 x i16>, <4 x i16>* %x + %b = load <4 x i16>, <4 x i16>* %y + %c = shufflevector <4 x i16> %a, <4 x i16> %b, <8 x i32> + %d = shufflevector <8 x i16> %c, <8 x i16> undef, <8 x i32> + store <8 x i16> %d, <8 x i16>* %z + ret void +}