diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h --- a/llvm/include/llvm/CodeGen/SelectionDAG.h +++ b/llvm/include/llvm/CodeGen/SelectionDAG.h @@ -1352,7 +1352,8 @@ SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags, - const AAMDNodes &AAInfo, bool IsCompressing = false); + const AAMDNodes &AAInfo = AAMDNodes(), + bool IsCompressing = false); SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Mask, SDValue EVL, MachineMemOperand *MMO, bool IsCompressing = false); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -930,6 +930,7 @@ setTargetDAGCombine(ISD::SRA); setTargetDAGCombine(ISD::SRL); setTargetDAGCombine(ISD::SHL); + setTargetDAGCombine(ISD::STORE); } } @@ -7116,6 +7117,30 @@ return V; return SDValue(); } + case ISD::STORE: { + auto *Store = cast(N); + SDValue Val = Store->getValue(); + // Combine store of vmv.x.s to vse with VL of 1. + // FIXME: Support FP. + if (Val.getOpcode() == RISCVISD::VMV_X_S) { + SDValue Src = Val.getOperand(0); + EVT VecVT = Src.getValueType(); + EVT MemVT = Store->getMemoryVT(); + // The memory VT and the element type must match. + if (VecVT.getVectorElementType() == MemVT) { + SDLoc DL(N); + MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount()); + return DAG.getStoreVP(Store->getChain(), DL, Src, Store->getBasePtr(), + DAG.getConstant(1, DL, MaskVT), + DAG.getConstant(1, DL, Subtarget.getXLenVT()), + Store->getPointerInfo(), + Store->getOriginalAlign(), + Store->getMemOperand()->getFlags()); + } + } + + break; + } } return SDValue(); diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll @@ -521,3 +521,79 @@ %c = extractelement <3 x i64> %b, i32 %idx ret i64 %c } + +define void @store_extractelt_v16i8(<16 x i8>* %x, i8* %p) nounwind { +; CHECK-LABEL: store_extractelt_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vle8.v v25, (a0) +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v25, v25, 7 +; CHECK-NEXT: vse8.v v25, (a1) +; CHECK-NEXT: ret + %a = load <16 x i8>, <16 x i8>* %x + %b = extractelement <16 x i8> %a, i32 7 + store i8 %b, i8* %p + ret void +} + +define void @store_extractelt_v8i16(<8 x i16>* %x, i16* %p) nounwind { +; CHECK-LABEL: store_extractelt_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vle16.v v25, (a0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v25, v25, 7 +; CHECK-NEXT: vse16.v v25, (a1) +; CHECK-NEXT: ret + %a = load <8 x i16>, <8 x i16>* %x + %b = extractelement <8 x i16> %a, i32 7 + store i16 %b, i16* %p + ret void +} + +define void @store_extractelt_v4i32(<4 x i32>* %x, i32* %p) nounwind { +; CHECK-LABEL: store_extractelt_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v25, (a0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v25, v25, 2 +; CHECK-NEXT: vse32.v v25, (a1) +; CHECK-NEXT: ret + %a = load <4 x i32>, <4 x i32>* %x + %b = extractelement <4 x i32> %a, i32 2 + store i32 %b, i32* %p + ret void +} + +; FIXME: Use vse64.v on RV32 to avoid two scalar extracts and two scalar stores. +define void @store_extractelt_v4i64(<2 x i64>* %x, i64* %p) nounwind { +; RV32-LABEL: store_extractelt_v4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vle64.v v25, (a0) +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vslidedown.vi v25, v25, 1 +; RV32-NEXT: addi a0, zero, 32 +; RV32-NEXT: vsrl.vx v26, v25, a0 +; RV32-NEXT: vmv.x.s a0, v26 +; RV32-NEXT: vmv.x.s a2, v25 +; RV32-NEXT: sw a2, 0(a1) +; RV32-NEXT: sw a0, 4(a1) +; RV32-NEXT: ret +; +; RV64-LABEL: store_extractelt_v4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vle64.v v25, (a0) +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vslidedown.vi v25, v25, 1 +; RV64-NEXT: vse64.v v25, (a1) +; RV64-NEXT: ret + %a = load <2 x i64>, <2 x i64>* %x + %b = extractelement <2 x i64> %a, i64 1 + store i64 %b, i64* %p + ret void +} + diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -727,10 +727,8 @@ ; RV32MV-NEXT: vmsne.vv v0, v26, v30 ; RV32MV-NEXT: vmv.v.i v26, 0 ; RV32MV-NEXT: vmerge.vim v26, v26, -1, v0 -; RV32MV-NEXT: vsetivli zero, 0, e32, m2, ta, mu -; RV32MV-NEXT: vmv.x.s a0, v26 -; RV32MV-NEXT: sw a0, 0(s1) ; RV32MV-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32MV-NEXT: vse32.v v26, (s1) ; RV32MV-NEXT: vslidedown.vi v28, v26, 1 ; RV32MV-NEXT: vmv.x.s a0, v28 ; RV32MV-NEXT: vslidedown.vi v28, v26, 2