diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -903,6 +903,7 @@ setTargetDAGCombine(ISD::SRA); setTargetDAGCombine(ISD::SRL); setTargetDAGCombine(ISD::SHL); + setTargetDAGCombine(ISD::STORE); } } @@ -6580,6 +6581,28 @@ unsigned WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL; return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL); } + case ISD::STORE: { + auto *Store = cast(N); + SDValue Val = Store->getValue(); + // Combine store of vmv.x.s to vse with VL of 1. + // FIXME: Support FP. + if (Val.getOpcode() == RISCVISD::VMV_X_S) { + SDValue Src = Val.getOperand(0); + EVT VecVT = Src.getValueType(); + EVT MemVT = Store->getMemoryVT(); + // The memory VT and the element type must match. + if (VecVT.getVectorElementType() == MemVT) { + SDLoc DL(N); + return DAG.getMemIntrinsicNode( + RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other), + {Store->getChain(), Src, Store->getBasePtr(), + DAG.getConstant(1, DL, Subtarget.getXLenVT())}, + Store->getMemoryVT(), Store->getMemOperand()); + } + } + + break; + } } return SDValue(); diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll @@ -521,3 +521,79 @@ %c = extractelement <3 x i64> %b, i32 %idx ret i64 %c } + +define void @store_extractelt_v16i8(<16 x i8>* %x, i8* %p) nounwind { +; CHECK-LABEL: store_extractelt_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vle8.v v25, (a0) +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v25, v25, 7 +; CHECK-NEXT: vse8.v v25, (a1) +; CHECK-NEXT: ret + %a = load <16 x i8>, <16 x i8>* %x + %b = extractelement <16 x i8> %a, i32 7 + store i8 %b, i8* %p + ret void +} + +define void @store_extractelt_v8i16(<8 x i16>* %x, i16* %p) nounwind { +; CHECK-LABEL: store_extractelt_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vle16.v v25, (a0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v25, v25, 7 +; CHECK-NEXT: vse16.v v25, (a1) +; CHECK-NEXT: ret + %a = load <8 x i16>, <8 x i16>* %x + %b = extractelement <8 x i16> %a, i32 7 + store i16 %b, i16* %p + ret void +} + +define void @store_extractelt_v4i32(<4 x i32>* %x, i32* %p) nounwind { +; CHECK-LABEL: store_extractelt_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v25, (a0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v25, v25, 2 +; CHECK-NEXT: vse32.v v25, (a1) +; CHECK-NEXT: ret + %a = load <4 x i32>, <4 x i32>* %x + %b = extractelement <4 x i32> %a, i32 2 + store i32 %b, i32* %p + ret void +} + +; FIXME: Use vse64.v on RV32 to avoid two scalar extracts and two scalar stores. +define void @store_extractelt_v4i64(<2 x i64>* %x, i64* %p) nounwind { +; RV32-LABEL: store_extractelt_v4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vle64.v v25, (a0) +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vslidedown.vi v25, v25, 1 +; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: vsrl.vx v26, v25, a0 +; RV32-NEXT: vmv.x.s a0, v26 +; RV32-NEXT: vmv.x.s a2, v25 +; RV32-NEXT: sw a2, 0(a1) +; RV32-NEXT: sw a0, 4(a1) +; RV32-NEXT: ret +; +; RV64-LABEL: store_extractelt_v4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vle64.v v25, (a0) +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vslidedown.vi v25, v25, 1 +; RV64-NEXT: vse64.v v25, (a1) +; RV64-NEXT: ret + %a = load <2 x i64>, <2 x i64>* %x + %b = extractelement <2 x i64> %a, i64 1 + store i64 %b, i64* %p + ret void +} + diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -727,10 +727,8 @@ ; RV32MV-NEXT: vmsne.vv v0, v26, v30 ; RV32MV-NEXT: vmv.v.i v26, 0 ; RV32MV-NEXT: vmerge.vim v26, v26, -1, v0 -; RV32MV-NEXT: vsetivli zero, 0, e32, m2, ta, mu -; RV32MV-NEXT: vmv.x.s a0, v26 -; RV32MV-NEXT: sw a0, 0(s1) ; RV32MV-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32MV-NEXT: vse32.v v26, (s1) ; RV32MV-NEXT: vslidedown.vi v28, v26, 1 ; RV32MV-NEXT: vmv.x.s a0, v28 ; RV32MV-NEXT: vslidedown.vi v28, v26, 2