diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -11041,6 +11041,20 @@ Index, ScaleOp}, MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore()); } + case RISCVISD::VRGATHER_VV_VL: { + // (vrgather src, (vid mask, vl), passthru, mask, vl) + // -> + // (vmerge src, passthru, mask, vl) + SDValue Idx = N->getOperand(1); + SDValue Mask = N->getOperand(3); + SDValue VL = N->getOperand(4); + if (Idx.getOpcode() != RISCVISD::VID_VL) + break; + if (Idx.getOperand(0) != Mask || Idx.getOperand(1) != VL) + break; + return DAG.getNode(RISCVISD::VP_MERGE_VL, SDLoc(N), N->getValueType(0), + Mask, N->getOperand(0), N->getOperand(2), VL); + } case RISCVISD::SRA_VL: case RISCVISD::SRL_VL: case RISCVISD::SHL_VL: { diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-gather.ll b/llvm/test/CodeGen/RISCV/rvv/combine-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/combine-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/combine-gather.ll @@ -2,19 +2,18 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v < %s | FileCheck %s -; FIXME: There should only be one vrgather here +; FIXME: The vadd.vi should be folded away here define <8 x i8> @gather_i8(<8 x i8> %v, <8 x i8> %w) { ; CHECK-LABEL: gather_i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vid.v v11 -; CHECK-NEXT: vrgather.vv v10, v8, v11 +; CHECK-NEXT: vadd.vi v8, v8, 0 ; CHECK-NEXT: li a0, 240 ; CHECK-NEXT: vmv.s.x v0, a0 -; CHECK-NEXT: vadd.vi v8, v11, -4 -; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vid.v v10 +; CHECK-NEXT: vadd.vi v10, v10, -4 +; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret %res = shufflevector <8 x i8> %v, <8 x i8> %w, <8 x i32> ret <8 x i8> %res