diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -475,6 +475,10 @@ SelectionDAG &DAG) const; SDValue lowerFixedLengthVectorLoadToRVV(SDValue Op, SelectionDAG &DAG) const; SDValue lowerFixedLengthVectorStoreToRVV(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerFixedLengthVectorMaskedLoadToRVV(SDValue Op, + SelectionDAG &DAG) const; + SDValue lowerFixedLengthVectorMaskedStoreToRVV(SDValue Op, + SelectionDAG &DAG) const; SDValue lowerFixedLengthVectorSetccToRVV(SDValue Op, SelectionDAG &DAG) const; SDValue lowerFixedLengthVectorLogicOpToRVV(SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -566,6 +566,8 @@ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); + setOperationAction(ISD::MLOAD, VT, Custom); + setOperationAction(ISD::MSTORE, VT, Custom); setOperationAction(ISD::ADD, VT, Custom); setOperationAction(ISD::MUL, VT, Custom); setOperationAction(ISD::SUB, VT, Custom); @@ -637,6 +639,8 @@ setOperationAction(ISD::LOAD, VT, Custom); setOperationAction(ISD::STORE, VT, Custom); + setOperationAction(ISD::MLOAD, VT, Custom); + setOperationAction(ISD::MSTORE, VT, Custom); setOperationAction(ISD::FADD, VT, Custom); setOperationAction(ISD::FSUB, VT, Custom); setOperationAction(ISD::FMUL, VT, Custom); @@ -1583,6 +1587,10 @@ return lowerFixedLengthVectorLoadToRVV(Op, DAG); case ISD::STORE: return lowerFixedLengthVectorStoreToRVV(Op, DAG); + case ISD::MLOAD: + return lowerFixedLengthVectorMaskedLoadToRVV(Op, DAG); + case ISD::MSTORE: + return lowerFixedLengthVectorMaskedStoreToRVV(Op, DAG); case ISD::SETCC: return lowerFixedLengthVectorSetccToRVV(Op, DAG); case ISD::ADD: @@ -3093,6 +3101,58 @@ Store->getMemoryVT(), Store->getMemOperand()); } +SDValue RISCVTargetLowering::lowerFixedLengthVectorMaskedLoadToRVV( + SDValue Op, SelectionDAG &DAG) const { + auto *Load = cast(Op); + + SDLoc DL(Op); + MVT VT = Op.getSimpleValueType(); + MVT ContainerVT = getContainerForFixedLengthVector(VT); + MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); + MVT XLenVT = Subtarget.getXLenVT(); + + SDValue Mask = + convertToScalableVector(MaskVT, Load->getMask(), DAG, Subtarget); + SDValue PassThru = + convertToScalableVector(ContainerVT, Load->getPassThru(), DAG, Subtarget); + SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); + + SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); + SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT); + SDValue Ops[] = {Load->getChain(), IntID, PassThru, + Load->getBasePtr(), Mask, VL}; + SDValue NewLoad = + DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, + Load->getMemoryVT(), Load->getMemOperand()); + + SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget); + return DAG.getMergeValues({Result, Load->getChain()}, DL); +} + +SDValue RISCVTargetLowering::lowerFixedLengthVectorMaskedStoreToRVV( + SDValue Op, SelectionDAG &DAG) const { + auto *Store = cast(Op); + + SDLoc DL(Op); + SDValue Val = Store->getValue(); + MVT VT = Val.getSimpleValueType(); + MVT ContainerVT = getContainerForFixedLengthVector(VT); + MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); + MVT XLenVT = Subtarget.getXLenVT(); + + Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget); + SDValue Mask = + convertToScalableVector(MaskVT, Store->getMask(), DAG, Subtarget); + + SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); + + SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT); + return DAG.getMemIntrinsicNode( + ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), + {Store->getChain(), IntID, Val, Store->getBasePtr(), Mask, VL}, + Store->getMemoryVT(), Store->getMemOperand()); +} + SDValue RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op, SelectionDAG &DAG) const { diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h @@ -60,6 +60,39 @@ } return ST->getXLen(); } + + bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) { + if (!ST->hasStdExtV()) + return false; + + // Only support fixed vectors if we know the minimum vector size. + if (isa(DataType) && ST->getMinRVVVectorSizeInBits() == 0) + return false; + + Type *ScalarTy = DataType->getScalarType(); + if (ScalarTy->isPointerTy()) + return true; + + if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) || + ScalarTy->isIntegerTy(32) || ScalarTy->isIntegerTy(64)) + return true; + + if (ScalarTy->isHalfTy()) + return ST->hasStdExtZfh(); + if (ScalarTy->isFloatTy()) + return ST->hasStdExtF(); + if (ScalarTy->isDoubleTy()) + return ST->hasStdExtD(); + + return false; + } + + bool isLegalMaskedLoad(Type *DataType, Align Alignment) { + return isLegalMaskedLoadStore(DataType, Alignment); + } + bool isLegalMaskedStore(Type *DataType, Align Alignment) { + return isLegalMaskedLoadStore(DataType, Alignment); + } }; } // end namespace llvm diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll @@ -0,0 +1,494 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-v,+f,+d,+experimental-zfh -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+m,+experimental-v,+f,+d,+experimental-zfh -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +define void @masked_load_v1f16(<1 x half>* %a, <1 x half>* %m_ptr, <1 x half>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 1, e16,m1,ta,mu +; CHECK-NEXT: vle16.v v25, (a1) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v25, ft0 +; CHECK-NEXT: vle16.v v25, (a0), v0.t +; CHECK-NEXT: vse16.v v25, (a2) +; CHECK-NEXT: ret + %m = load <1 x half>, <1 x half>* %m_ptr + %mask = fcmp oeq <1 x half> %m, zeroinitializer + %load = call <1 x half> @llvm.masked.load.v1f16(<1 x half>* %a, i32 8, <1 x i1> %mask, <1 x half> undef) + store <1 x half> %load, <1 x half>* %res_ptr + ret void +} +declare <1 x half> @llvm.masked.load.v1f16(<1 x half>*, i32, <1 x i1>, <1 x half>) + +define void @masked_load_v1f32(<1 x float>* %a, <1 x float>* %m_ptr, <1 x float>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 1, e32,m1,ta,mu +; CHECK-NEXT: vle32.v v25, (a1) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v25, ft0 +; CHECK-NEXT: vle32.v v25, (a0), v0.t +; CHECK-NEXT: vse32.v v25, (a2) +; CHECK-NEXT: ret + %m = load <1 x float>, <1 x float>* %m_ptr + %mask = fcmp oeq <1 x float> %m, zeroinitializer + %load = call <1 x float> @llvm.masked.load.v1f32(<1 x float>* %a, i32 8, <1 x i1> %mask, <1 x float> undef) + store <1 x float> %load, <1 x float>* %res_ptr + ret void +} +declare <1 x float> @llvm.masked.load.v1f32(<1 x float>*, i32, <1 x i1>, <1 x float>) + +define void @masked_load_v1f64(<1 x double>* %a, <1 x double>* %m_ptr, <1 x double>* %res_ptr) nounwind { +; RV32-LABEL: masked_load_v1f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 1, e64,m1,ta,mu +; RV32-NEXT: vle64.v v25, (a1) +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: vmfeq.vf v0, v25, ft0 +; RV32-NEXT: vle64.v v25, (a0), v0.t +; RV32-NEXT: vse64.v v25, (a2) +; RV32-NEXT: ret +; +; RV64-LABEL: masked_load_v1f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 1, e64,m1,ta,mu +; RV64-NEXT: vle64.v v25, (a1) +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: vmfeq.vf v0, v25, ft0 +; RV64-NEXT: vle64.v v25, (a0), v0.t +; RV64-NEXT: vse64.v v25, (a2) +; RV64-NEXT: ret + %m = load <1 x double>, <1 x double>* %m_ptr + %mask = fcmp oeq <1 x double> %m, zeroinitializer + %load = call <1 x double> @llvm.masked.load.v1f64(<1 x double>* %a, i32 8, <1 x i1> %mask, <1 x double> undef) + store <1 x double> %load, <1 x double>* %res_ptr + ret void +} +declare <1 x double> @llvm.masked.load.v1f64(<1 x double>*, i32, <1 x i1>, <1 x double>) + +define void @masked_load_v2f16(<2 x half>* %a, <2 x half>* %m_ptr, <2 x half>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 2, e16,m1,ta,mu +; CHECK-NEXT: vle16.v v25, (a1) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v25, ft0 +; CHECK-NEXT: vle16.v v25, (a0), v0.t +; CHECK-NEXT: vse16.v v25, (a2) +; CHECK-NEXT: ret + %m = load <2 x half>, <2 x half>* %m_ptr + %mask = fcmp oeq <2 x half> %m, zeroinitializer + %load = call <2 x half> @llvm.masked.load.v2f16(<2 x half>* %a, i32 8, <2 x i1> %mask, <2 x half> undef) + store <2 x half> %load, <2 x half>* %res_ptr + ret void +} +declare <2 x half> @llvm.masked.load.v2f16(<2 x half>*, i32, <2 x i1>, <2 x half>) + +define void @masked_load_v2f32(<2 x float>* %a, <2 x float>* %m_ptr, <2 x float>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 2, e32,m1,ta,mu +; CHECK-NEXT: vle32.v v25, (a1) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v25, ft0 +; CHECK-NEXT: vle32.v v25, (a0), v0.t +; CHECK-NEXT: vse32.v v25, (a2) +; CHECK-NEXT: ret + %m = load <2 x float>, <2 x float>* %m_ptr + %mask = fcmp oeq <2 x float> %m, zeroinitializer + %load = call <2 x float> @llvm.masked.load.v2f32(<2 x float>* %a, i32 8, <2 x i1> %mask, <2 x float> undef) + store <2 x float> %load, <2 x float>* %res_ptr + ret void +} +declare <2 x float> @llvm.masked.load.v2f32(<2 x float>*, i32, <2 x i1>, <2 x float>) + +define void @masked_load_v2f64(<2 x double>* %a, <2 x double>* %m_ptr, <2 x double>* %res_ptr) nounwind { +; RV32-LABEL: masked_load_v2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 2, e64,m1,ta,mu +; RV32-NEXT: vle64.v v25, (a1) +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: vmfeq.vf v0, v25, ft0 +; RV32-NEXT: vle64.v v25, (a0), v0.t +; RV32-NEXT: vse64.v v25, (a2) +; RV32-NEXT: ret +; +; RV64-LABEL: masked_load_v2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 2, e64,m1,ta,mu +; RV64-NEXT: vle64.v v25, (a1) +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: vmfeq.vf v0, v25, ft0 +; RV64-NEXT: vle64.v v25, (a0), v0.t +; RV64-NEXT: vse64.v v25, (a2) +; RV64-NEXT: ret + %m = load <2 x double>, <2 x double>* %m_ptr + %mask = fcmp oeq <2 x double> %m, zeroinitializer + %load = call <2 x double> @llvm.masked.load.v2f64(<2 x double>* %a, i32 8, <2 x i1> %mask, <2 x double> undef) + store <2 x double> %load, <2 x double>* %res_ptr + ret void +} +declare <2 x double> @llvm.masked.load.v2f64(<2 x double>*, i32, <2 x i1>, <2 x double>) + +define void @masked_load_v4f16(<4 x half>* %a, <4 x half>* %m_ptr, <4 x half>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 4, e16,m1,ta,mu +; CHECK-NEXT: vle16.v v25, (a1) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v25, ft0 +; CHECK-NEXT: vle16.v v25, (a0), v0.t +; CHECK-NEXT: vse16.v v25, (a2) +; CHECK-NEXT: ret + %m = load <4 x half>, <4 x half>* %m_ptr + %mask = fcmp oeq <4 x half> %m, zeroinitializer + %load = call <4 x half> @llvm.masked.load.v4f16(<4 x half>* %a, i32 8, <4 x i1> %mask, <4 x half> undef) + store <4 x half> %load, <4 x half>* %res_ptr + ret void +} +declare <4 x half> @llvm.masked.load.v4f16(<4 x half>*, i32, <4 x i1>, <4 x half>) + +define void @masked_load_v4f32(<4 x float>* %a, <4 x float>* %m_ptr, <4 x float>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 4, e32,m1,ta,mu +; CHECK-NEXT: vle32.v v25, (a1) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v25, ft0 +; CHECK-NEXT: vle32.v v25, (a0), v0.t +; CHECK-NEXT: vse32.v v25, (a2) +; CHECK-NEXT: ret + %m = load <4 x float>, <4 x float>* %m_ptr + %mask = fcmp oeq <4 x float> %m, zeroinitializer + %load = call <4 x float> @llvm.masked.load.v4f32(<4 x float>* %a, i32 8, <4 x i1> %mask, <4 x float> undef) + store <4 x float> %load, <4 x float>* %res_ptr + ret void +} +declare <4 x float> @llvm.masked.load.v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>) + +define void @masked_load_v4f64(<4 x double>* %a, <4 x double>* %m_ptr, <4 x double>* %res_ptr) nounwind { +; RV32-LABEL: masked_load_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 4, e64,m2,ta,mu +; RV32-NEXT: vle64.v v26, (a1) +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: vmfeq.vf v0, v26, ft0 +; RV32-NEXT: vle64.v v26, (a0), v0.t +; RV32-NEXT: vse64.v v26, (a2) +; RV32-NEXT: ret +; +; RV64-LABEL: masked_load_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 4, e64,m2,ta,mu +; RV64-NEXT: vle64.v v26, (a1) +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: vmfeq.vf v0, v26, ft0 +; RV64-NEXT: vle64.v v26, (a0), v0.t +; RV64-NEXT: vse64.v v26, (a2) +; RV64-NEXT: ret + %m = load <4 x double>, <4 x double>* %m_ptr + %mask = fcmp oeq <4 x double> %m, zeroinitializer + %load = call <4 x double> @llvm.masked.load.v4f64(<4 x double>* %a, i32 8, <4 x i1> %mask, <4 x double> undef) + store <4 x double> %load, <4 x double>* %res_ptr + ret void +} +declare <4 x double> @llvm.masked.load.v4f64(<4 x double>*, i32, <4 x i1>, <4 x double>) + +define void @masked_load_v8f16(<8 x half>* %a, <8 x half>* %m_ptr, <8 x half>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 8, e16,m1,ta,mu +; CHECK-NEXT: vle16.v v25, (a1) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v25, ft0 +; CHECK-NEXT: vle16.v v25, (a0), v0.t +; CHECK-NEXT: vse16.v v25, (a2) +; CHECK-NEXT: ret + %m = load <8 x half>, <8 x half>* %m_ptr + %mask = fcmp oeq <8 x half> %m, zeroinitializer + %load = call <8 x half> @llvm.masked.load.v8f16(<8 x half>* %a, i32 8, <8 x i1> %mask, <8 x half> undef) + store <8 x half> %load, <8 x half>* %res_ptr + ret void +} +declare <8 x half> @llvm.masked.load.v8f16(<8 x half>*, i32, <8 x i1>, <8 x half>) + +define void @masked_load_v8f32(<8 x float>* %a, <8 x float>* %m_ptr, <8 x float>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 8, e32,m2,ta,mu +; CHECK-NEXT: vle32.v v26, (a1) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v26, ft0 +; CHECK-NEXT: vle32.v v26, (a0), v0.t +; CHECK-NEXT: vse32.v v26, (a2) +; CHECK-NEXT: ret + %m = load <8 x float>, <8 x float>* %m_ptr + %mask = fcmp oeq <8 x float> %m, zeroinitializer + %load = call <8 x float> @llvm.masked.load.v8f32(<8 x float>* %a, i32 8, <8 x i1> %mask, <8 x float> undef) + store <8 x float> %load, <8 x float>* %res_ptr + ret void +} +declare <8 x float> @llvm.masked.load.v8f32(<8 x float>*, i32, <8 x i1>, <8 x float>) + +define void @masked_load_v8f64(<8 x double>* %a, <8 x double>* %m_ptr, <8 x double>* %res_ptr) nounwind { +; RV32-LABEL: masked_load_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 8, e64,m4,ta,mu +; RV32-NEXT: vle64.v v28, (a1) +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: vmfeq.vf v0, v28, ft0 +; RV32-NEXT: vle64.v v28, (a0), v0.t +; RV32-NEXT: vse64.v v28, (a2) +; RV32-NEXT: ret +; +; RV64-LABEL: masked_load_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 8, e64,m4,ta,mu +; RV64-NEXT: vle64.v v28, (a1) +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: vmfeq.vf v0, v28, ft0 +; RV64-NEXT: vle64.v v28, (a0), v0.t +; RV64-NEXT: vse64.v v28, (a2) +; RV64-NEXT: ret + %m = load <8 x double>, <8 x double>* %m_ptr + %mask = fcmp oeq <8 x double> %m, zeroinitializer + %load = call <8 x double> @llvm.masked.load.v8f64(<8 x double>* %a, i32 8, <8 x i1> %mask, <8 x double> undef) + store <8 x double> %load, <8 x double>* %res_ptr + ret void +} +declare <8 x double> @llvm.masked.load.v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>) + +define void @masked_load_v16f16(<16 x half>* %a, <16 x half>* %m_ptr, <16 x half>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 16, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v26, ft0 +; CHECK-NEXT: vle16.v v26, (a0), v0.t +; CHECK-NEXT: vse16.v v26, (a2) +; CHECK-NEXT: ret + %m = load <16 x half>, <16 x half>* %m_ptr + %mask = fcmp oeq <16 x half> %m, zeroinitializer + %load = call <16 x half> @llvm.masked.load.v16f16(<16 x half>* %a, i32 8, <16 x i1> %mask, <16 x half> undef) + store <16 x half> %load, <16 x half>* %res_ptr + ret void +} +declare <16 x half> @llvm.masked.load.v16f16(<16 x half>*, i32, <16 x i1>, <16 x half>) + +define void @masked_load_v16f32(<16 x float>* %a, <16 x float>* %m_ptr, <16 x float>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 16, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v28, ft0 +; CHECK-NEXT: vle32.v v28, (a0), v0.t +; CHECK-NEXT: vse32.v v28, (a2) +; CHECK-NEXT: ret + %m = load <16 x float>, <16 x float>* %m_ptr + %mask = fcmp oeq <16 x float> %m, zeroinitializer + %load = call <16 x float> @llvm.masked.load.v16f32(<16 x float>* %a, i32 8, <16 x i1> %mask, <16 x float> undef) + store <16 x float> %load, <16 x float>* %res_ptr + ret void +} +declare <16 x float> @llvm.masked.load.v16f32(<16 x float>*, i32, <16 x i1>, <16 x float>) + +define void @masked_load_v16f64(<16 x double>* %a, <16 x double>* %m_ptr, <16 x double>* %res_ptr) nounwind { +; RV32-LABEL: masked_load_v16f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 16, e64,m8,ta,mu +; RV32-NEXT: vle64.v v8, (a1) +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: vmfeq.vf v0, v8, ft0 +; RV32-NEXT: vsetivli a1, 16, e64,m8,tu,mu +; RV32-NEXT: vle64.v v8, (a0), v0.t +; RV32-NEXT: vsetivli a0, 16, e64,m8,ta,mu +; RV32-NEXT: vse64.v v8, (a2) +; RV32-NEXT: ret +; +; RV64-LABEL: masked_load_v16f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 16, e64,m8,ta,mu +; RV64-NEXT: vle64.v v8, (a1) +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: vmfeq.vf v0, v8, ft0 +; RV64-NEXT: vsetivli a1, 16, e64,m8,tu,mu +; RV64-NEXT: vle64.v v8, (a0), v0.t +; RV64-NEXT: vsetivli a0, 16, e64,m8,ta,mu +; RV64-NEXT: vse64.v v8, (a2) +; RV64-NEXT: ret + %m = load <16 x double>, <16 x double>* %m_ptr + %mask = fcmp oeq <16 x double> %m, zeroinitializer + %load = call <16 x double> @llvm.masked.load.v16f64(<16 x double>* %a, i32 8, <16 x i1> %mask, <16 x double> undef) + store <16 x double> %load, <16 x double>* %res_ptr + ret void +} +declare <16 x double> @llvm.masked.load.v16f64(<16 x double>*, i32, <16 x i1>, <16 x double>) + +define void @masked_load_v32f16(<32 x half>* %a, <32 x half>* %m_ptr, <32 x half>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: vsetvli a3, a3, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v28, ft0 +; CHECK-NEXT: vle16.v v28, (a0), v0.t +; CHECK-NEXT: vse16.v v28, (a2) +; CHECK-NEXT: ret + %m = load <32 x half>, <32 x half>* %m_ptr + %mask = fcmp oeq <32 x half> %m, zeroinitializer + %load = call <32 x half> @llvm.masked.load.v32f16(<32 x half>* %a, i32 8, <32 x i1> %mask, <32 x half> undef) + store <32 x half> %load, <32 x half>* %res_ptr + ret void +} +declare <32 x half> @llvm.masked.load.v32f16(<32 x half>*, i32, <32 x i1>, <32 x half>) + +define void @masked_load_v32f32(<32 x float>* %a, <32 x float>* %m_ptr, <32 x float>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v32f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: vsetvli a4, a3, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v8, ft0 +; CHECK-NEXT: vsetvli a1, a3, e32,m8,tu,mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli a0, a3, e32,m8,ta,mu +; CHECK-NEXT: vse32.v v8, (a2) +; CHECK-NEXT: ret + %m = load <32 x float>, <32 x float>* %m_ptr + %mask = fcmp oeq <32 x float> %m, zeroinitializer + %load = call <32 x float> @llvm.masked.load.v32f32(<32 x float>* %a, i32 8, <32 x i1> %mask, <32 x float> undef) + store <32 x float> %load, <32 x float>* %res_ptr + ret void +} +declare <32 x float> @llvm.masked.load.v32f32(<32 x float>*, i32, <32 x i1>, <32 x float>) + +define void @masked_load_v32f64(<32 x double>* %a, <32 x double>* %m_ptr, <32 x double>* %res_ptr) nounwind { +; RV32-LABEL: masked_load_v32f64: +; RV32: # %bb.0: +; RV32-NEXT: addi a3, a1, 128 +; RV32-NEXT: vsetivli a4, 16, e64,m8,ta,mu +; RV32-NEXT: vle64.v v8, (a1) +; RV32-NEXT: vle64.v v16, (a3) +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: vmfeq.vf v0, v8, ft0 +; RV32-NEXT: vmfeq.vf v26, v16, ft0 +; RV32-NEXT: vsetivli a1, 16, e64,m8,tu,mu +; RV32-NEXT: vle64.v v8, (a0), v0.t +; RV32-NEXT: addi a0, a0, 128 +; RV32-NEXT: vmv1r.v v0, v26 +; RV32-NEXT: vle64.v v16, (a0), v0.t +; RV32-NEXT: vsetivli a0, 16, e64,m8,ta,mu +; RV32-NEXT: vse64.v v8, (a2) +; RV32-NEXT: addi a0, a2, 128 +; RV32-NEXT: vse64.v v16, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: masked_load_v32f64: +; RV64: # %bb.0: +; RV64-NEXT: addi a3, a1, 128 +; RV64-NEXT: vsetivli a4, 16, e64,m8,ta,mu +; RV64-NEXT: vle64.v v8, (a1) +; RV64-NEXT: vle64.v v16, (a3) +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: vmfeq.vf v0, v8, ft0 +; RV64-NEXT: vmfeq.vf v26, v16, ft0 +; RV64-NEXT: vsetivli a1, 16, e64,m8,tu,mu +; RV64-NEXT: vle64.v v8, (a0), v0.t +; RV64-NEXT: addi a0, a0, 128 +; RV64-NEXT: vmv1r.v v0, v26 +; RV64-NEXT: vle64.v v16, (a0), v0.t +; RV64-NEXT: vsetivli a0, 16, e64,m8,ta,mu +; RV64-NEXT: vse64.v v8, (a2) +; RV64-NEXT: addi a0, a2, 128 +; RV64-NEXT: vse64.v v16, (a0) +; RV64-NEXT: ret + %m = load <32 x double>, <32 x double>* %m_ptr + %mask = fcmp oeq <32 x double> %m, zeroinitializer + %load = call <32 x double> @llvm.masked.load.v32f64(<32 x double>* %a, i32 8, <32 x i1> %mask, <32 x double> undef) + store <32 x double> %load, <32 x double>* %res_ptr + ret void +} +declare <32 x double> @llvm.masked.load.v32f64(<32 x double>*, i32, <32 x i1>, <32 x double>) + +define void @masked_load_v64f16(<64 x half>* %a, <64 x half>* %m_ptr, <64 x half>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v64f16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: vsetvli a4, a3, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v8, ft0 +; CHECK-NEXT: vsetvli a1, a3, e16,m8,tu,mu +; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli a0, a3, e16,m8,ta,mu +; CHECK-NEXT: vse16.v v8, (a2) +; CHECK-NEXT: ret + %m = load <64 x half>, <64 x half>* %m_ptr + %mask = fcmp oeq <64 x half> %m, zeroinitializer + %load = call <64 x half> @llvm.masked.load.v64f16(<64 x half>* %a, i32 8, <64 x i1> %mask, <64 x half> undef) + store <64 x half> %load, <64 x half>* %res_ptr + ret void +} +declare <64 x half> @llvm.masked.load.v64f16(<64 x half>*, i32, <64 x i1>, <64 x half>) + +define void @masked_load_v64f32(<64 x float>* %a, <64 x float>* %m_ptr, <64 x float>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v64f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, a1, 128 +; CHECK-NEXT: addi a4, zero, 32 +; CHECK-NEXT: vsetvli a5, a4, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vle32.v v16, (a3) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v8, ft0 +; CHECK-NEXT: vmfeq.vf v26, v16, ft0 +; CHECK-NEXT: vsetvli a1, a4, e32,m8,tu,mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: addi a0, a0, 128 +; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vle32.v v16, (a0), v0.t +; CHECK-NEXT: vsetvli a0, a4, e32,m8,ta,mu +; CHECK-NEXT: vse32.v v8, (a2) +; CHECK-NEXT: addi a0, a2, 128 +; CHECK-NEXT: vse32.v v16, (a0) +; CHECK-NEXT: ret + %m = load <64 x float>, <64 x float>* %m_ptr + %mask = fcmp oeq <64 x float> %m, zeroinitializer + %load = call <64 x float> @llvm.masked.load.v64f32(<64 x float>* %a, i32 8, <64 x i1> %mask, <64 x float> undef) + store <64 x float> %load, <64 x float>* %res_ptr + ret void +} +declare <64 x float> @llvm.masked.load.v64f32(<64 x float>*, i32, <64 x i1>, <64 x float>) + +define void @masked_load_v128f16(<128 x half>* %a, <128 x half>* %m_ptr, <128 x half>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v128f16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, a1, 128 +; CHECK-NEXT: addi a4, zero, 64 +; CHECK-NEXT: vsetvli a5, a4, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vle16.v v16, (a3) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v8, ft0 +; CHECK-NEXT: vmfeq.vf v26, v16, ft0 +; CHECK-NEXT: vsetvli a1, a4, e16,m8,tu,mu +; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: addi a0, a0, 128 +; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vle16.v v16, (a0), v0.t +; CHECK-NEXT: vsetvli a0, a4, e16,m8,ta,mu +; CHECK-NEXT: vse16.v v8, (a2) +; CHECK-NEXT: addi a0, a2, 128 +; CHECK-NEXT: vse16.v v16, (a0) +; CHECK-NEXT: ret + %m = load <128 x half>, <128 x half>* %m_ptr + %mask = fcmp oeq <128 x half> %m, zeroinitializer + %load = call <128 x half> @llvm.masked.load.v128f16(<128 x half>* %a, i32 8, <128 x i1> %mask, <128 x half> undef) + store <128 x half> %load, <128 x half>* %res_ptr + ret void +} +declare <128 x half> @llvm.masked.load.v128f16(<128 x half>*, i32, <128 x i1>, <128 x half>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll @@ -0,0 +1,628 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+m,+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +define void @masked_load_v1i8(<1 x i8>* %a, <1 x i8>* %m_ptr, <1 x i8>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 1, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vle8.v v25, (a0), v0.t +; CHECK-NEXT: vse8.v v25, (a2) +; CHECK-NEXT: ret + %m = load <1 x i8>, <1 x i8>* %m_ptr + %mask = icmp eq <1 x i8> %m, zeroinitializer + %load = call <1 x i8> @llvm.masked.load.v1i8(<1 x i8>* %a, i32 8, <1 x i1> %mask, <1 x i8> undef) + store <1 x i8> %load, <1 x i8>* %res_ptr + ret void +} +declare <1 x i8> @llvm.masked.load.v1i8(<1 x i8>*, i32, <1 x i1>, <1 x i8>) + +define void @masked_load_v1i16(<1 x i16>* %a, <1 x i16>* %m_ptr, <1 x i16>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 1, e16,m1,ta,mu +; CHECK-NEXT: vle16.v v25, (a1) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vle16.v v25, (a0), v0.t +; CHECK-NEXT: vse16.v v25, (a2) +; CHECK-NEXT: ret + %m = load <1 x i16>, <1 x i16>* %m_ptr + %mask = icmp eq <1 x i16> %m, zeroinitializer + %load = call <1 x i16> @llvm.masked.load.v1i16(<1 x i16>* %a, i32 8, <1 x i1> %mask, <1 x i16> undef) + store <1 x i16> %load, <1 x i16>* %res_ptr + ret void +} +declare <1 x i16> @llvm.masked.load.v1i16(<1 x i16>*, i32, <1 x i1>, <1 x i16>) + +define void @masked_load_v1i32(<1 x i32>* %a, <1 x i32>* %m_ptr, <1 x i32>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 1, e32,m1,ta,mu +; CHECK-NEXT: vle32.v v25, (a1) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vle32.v v25, (a0), v0.t +; CHECK-NEXT: vse32.v v25, (a2) +; CHECK-NEXT: ret + %m = load <1 x i32>, <1 x i32>* %m_ptr + %mask = icmp eq <1 x i32> %m, zeroinitializer + %load = call <1 x i32> @llvm.masked.load.v1i32(<1 x i32>* %a, i32 8, <1 x i1> %mask, <1 x i32> undef) + store <1 x i32> %load, <1 x i32>* %res_ptr + ret void +} +declare <1 x i32> @llvm.masked.load.v1i32(<1 x i32>*, i32, <1 x i1>, <1 x i32>) + +define void @masked_load_v1i64(<1 x i64>* %a, <1 x i64>* %m_ptr, <1 x i64>* %res_ptr) nounwind { +; RV32-LABEL: masked_load_v1i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 1, e64,m1,ta,mu +; RV32-NEXT: vle64.v v25, (a1) +; RV32-NEXT: vsetivli a1, 2, e32,m1,ta,mu +; RV32-NEXT: vmv.v.i v26, 0 +; RV32-NEXT: vsetivli a1, 1, e64,m1,ta,mu +; RV32-NEXT: vmseq.vv v0, v25, v26 +; RV32-NEXT: vle64.v v25, (a0), v0.t +; RV32-NEXT: vse64.v v25, (a2) +; RV32-NEXT: ret +; +; RV64-LABEL: masked_load_v1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 1, e64,m1,ta,mu +; RV64-NEXT: vle64.v v25, (a1) +; RV64-NEXT: vmseq.vi v0, v25, 0 +; RV64-NEXT: vle64.v v25, (a0), v0.t +; RV64-NEXT: vse64.v v25, (a2) +; RV64-NEXT: ret + %m = load <1 x i64>, <1 x i64>* %m_ptr + %mask = icmp eq <1 x i64> %m, zeroinitializer + %load = call <1 x i64> @llvm.masked.load.v1i64(<1 x i64>* %a, i32 8, <1 x i1> %mask, <1 x i64> undef) + store <1 x i64> %load, <1 x i64>* %res_ptr + ret void +} +declare <1 x i64> @llvm.masked.load.v1i64(<1 x i64>*, i32, <1 x i1>, <1 x i64>) + +define void @masked_load_v2i8(<2 x i8>* %a, <2 x i8>* %m_ptr, <2 x i8>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 2, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vle8.v v25, (a0), v0.t +; CHECK-NEXT: vse8.v v25, (a2) +; CHECK-NEXT: ret + %m = load <2 x i8>, <2 x i8>* %m_ptr + %mask = icmp eq <2 x i8> %m, zeroinitializer + %load = call <2 x i8> @llvm.masked.load.v2i8(<2 x i8>* %a, i32 8, <2 x i1> %mask, <2 x i8> undef) + store <2 x i8> %load, <2 x i8>* %res_ptr + ret void +} +declare <2 x i8> @llvm.masked.load.v2i8(<2 x i8>*, i32, <2 x i1>, <2 x i8>) + +define void @masked_load_v2i16(<2 x i16>* %a, <2 x i16>* %m_ptr, <2 x i16>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 2, e16,m1,ta,mu +; CHECK-NEXT: vle16.v v25, (a1) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vle16.v v25, (a0), v0.t +; CHECK-NEXT: vse16.v v25, (a2) +; CHECK-NEXT: ret + %m = load <2 x i16>, <2 x i16>* %m_ptr + %mask = icmp eq <2 x i16> %m, zeroinitializer + %load = call <2 x i16> @llvm.masked.load.v2i16(<2 x i16>* %a, i32 8, <2 x i1> %mask, <2 x i16> undef) + store <2 x i16> %load, <2 x i16>* %res_ptr + ret void +} +declare <2 x i16> @llvm.masked.load.v2i16(<2 x i16>*, i32, <2 x i1>, <2 x i16>) + +define void @masked_load_v2i32(<2 x i32>* %a, <2 x i32>* %m_ptr, <2 x i32>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 2, e32,m1,ta,mu +; CHECK-NEXT: vle32.v v25, (a1) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vle32.v v25, (a0), v0.t +; CHECK-NEXT: vse32.v v25, (a2) +; CHECK-NEXT: ret + %m = load <2 x i32>, <2 x i32>* %m_ptr + %mask = icmp eq <2 x i32> %m, zeroinitializer + %load = call <2 x i32> @llvm.masked.load.v2i32(<2 x i32>* %a, i32 8, <2 x i1> %mask, <2 x i32> undef) + store <2 x i32> %load, <2 x i32>* %res_ptr + ret void +} +declare <2 x i32> @llvm.masked.load.v2i32(<2 x i32>*, i32, <2 x i1>, <2 x i32>) + +define void @masked_load_v2i64(<2 x i64>* %a, <2 x i64>* %m_ptr, <2 x i64>* %res_ptr) nounwind { +; RV32-LABEL: masked_load_v2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 2, e64,m1,ta,mu +; RV32-NEXT: vle64.v v25, (a1) +; RV32-NEXT: vsetivli a1, 4, e32,m1,ta,mu +; RV32-NEXT: vmv.v.i v26, 0 +; RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu +; RV32-NEXT: vmseq.vv v0, v25, v26 +; RV32-NEXT: vle64.v v25, (a0), v0.t +; RV32-NEXT: vse64.v v25, (a2) +; RV32-NEXT: ret +; +; RV64-LABEL: masked_load_v2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 2, e64,m1,ta,mu +; RV64-NEXT: vle64.v v25, (a1) +; RV64-NEXT: vmseq.vi v0, v25, 0 +; RV64-NEXT: vle64.v v25, (a0), v0.t +; RV64-NEXT: vse64.v v25, (a2) +; RV64-NEXT: ret + %m = load <2 x i64>, <2 x i64>* %m_ptr + %mask = icmp eq <2 x i64> %m, zeroinitializer + %load = call <2 x i64> @llvm.masked.load.v2i64(<2 x i64>* %a, i32 8, <2 x i1> %mask, <2 x i64> undef) + store <2 x i64> %load, <2 x i64>* %res_ptr + ret void +} +declare <2 x i64> @llvm.masked.load.v2i64(<2 x i64>*, i32, <2 x i1>, <2 x i64>) + +define void @masked_load_v4i8(<4 x i8>* %a, <4 x i8>* %m_ptr, <4 x i8>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 4, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vle8.v v25, (a0), v0.t +; CHECK-NEXT: vse8.v v25, (a2) +; CHECK-NEXT: ret + %m = load <4 x i8>, <4 x i8>* %m_ptr + %mask = icmp eq <4 x i8> %m, zeroinitializer + %load = call <4 x i8> @llvm.masked.load.v4i8(<4 x i8>* %a, i32 8, <4 x i1> %mask, <4 x i8> undef) + store <4 x i8> %load, <4 x i8>* %res_ptr + ret void +} +declare <4 x i8> @llvm.masked.load.v4i8(<4 x i8>*, i32, <4 x i1>, <4 x i8>) + +define void @masked_load_v4i16(<4 x i16>* %a, <4 x i16>* %m_ptr, <4 x i16>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 4, e16,m1,ta,mu +; CHECK-NEXT: vle16.v v25, (a1) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vle16.v v25, (a0), v0.t +; CHECK-NEXT: vse16.v v25, (a2) +; CHECK-NEXT: ret + %m = load <4 x i16>, <4 x i16>* %m_ptr + %mask = icmp eq <4 x i16> %m, zeroinitializer + %load = call <4 x i16> @llvm.masked.load.v4i16(<4 x i16>* %a, i32 8, <4 x i1> %mask, <4 x i16> undef) + store <4 x i16> %load, <4 x i16>* %res_ptr + ret void +} +declare <4 x i16> @llvm.masked.load.v4i16(<4 x i16>*, i32, <4 x i1>, <4 x i16>) + +define void @masked_load_v4i32(<4 x i32>* %a, <4 x i32>* %m_ptr, <4 x i32>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 4, e32,m1,ta,mu +; CHECK-NEXT: vle32.v v25, (a1) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vle32.v v25, (a0), v0.t +; CHECK-NEXT: vse32.v v25, (a2) +; CHECK-NEXT: ret + %m = load <4 x i32>, <4 x i32>* %m_ptr + %mask = icmp eq <4 x i32> %m, zeroinitializer + %load = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %a, i32 8, <4 x i1> %mask, <4 x i32> undef) + store <4 x i32> %load, <4 x i32>* %res_ptr + ret void +} +declare <4 x i32> @llvm.masked.load.v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>) + +define void @masked_load_v4i64(<4 x i64>* %a, <4 x i64>* %m_ptr, <4 x i64>* %res_ptr) nounwind { +; RV32-LABEL: masked_load_v4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 4, e64,m2,ta,mu +; RV32-NEXT: vle64.v v26, (a1) +; RV32-NEXT: vsetivli a1, 8, e32,m2,ta,mu +; RV32-NEXT: vmv.v.i v28, 0 +; RV32-NEXT: vsetivli a1, 4, e64,m2,ta,mu +; RV32-NEXT: vmseq.vv v0, v26, v28 +; RV32-NEXT: vle64.v v26, (a0), v0.t +; RV32-NEXT: vse64.v v26, (a2) +; RV32-NEXT: ret +; +; RV64-LABEL: masked_load_v4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 4, e64,m2,ta,mu +; RV64-NEXT: vle64.v v26, (a1) +; RV64-NEXT: vmseq.vi v0, v26, 0 +; RV64-NEXT: vle64.v v26, (a0), v0.t +; RV64-NEXT: vse64.v v26, (a2) +; RV64-NEXT: ret + %m = load <4 x i64>, <4 x i64>* %m_ptr + %mask = icmp eq <4 x i64> %m, zeroinitializer + %load = call <4 x i64> @llvm.masked.load.v4i64(<4 x i64>* %a, i32 8, <4 x i1> %mask, <4 x i64> undef) + store <4 x i64> %load, <4 x i64>* %res_ptr + ret void +} +declare <4 x i64> @llvm.masked.load.v4i64(<4 x i64>*, i32, <4 x i1>, <4 x i64>) + +define void @masked_load_v8i8(<8 x i8>* %a, <8 x i8>* %m_ptr, <8 x i8>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 8, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vle8.v v25, (a0), v0.t +; CHECK-NEXT: vse8.v v25, (a2) +; CHECK-NEXT: ret + %m = load <8 x i8>, <8 x i8>* %m_ptr + %mask = icmp eq <8 x i8> %m, zeroinitializer + %load = call <8 x i8> @llvm.masked.load.v8i8(<8 x i8>* %a, i32 8, <8 x i1> %mask, <8 x i8> undef) + store <8 x i8> %load, <8 x i8>* %res_ptr + ret void +} +declare <8 x i8> @llvm.masked.load.v8i8(<8 x i8>*, i32, <8 x i1>, <8 x i8>) + +define void @masked_load_v8i16(<8 x i16>* %a, <8 x i16>* %m_ptr, <8 x i16>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 8, e16,m1,ta,mu +; CHECK-NEXT: vle16.v v25, (a1) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vle16.v v25, (a0), v0.t +; CHECK-NEXT: vse16.v v25, (a2) +; CHECK-NEXT: ret + %m = load <8 x i16>, <8 x i16>* %m_ptr + %mask = icmp eq <8 x i16> %m, zeroinitializer + %load = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %a, i32 8, <8 x i1> %mask, <8 x i16> undef) + store <8 x i16> %load, <8 x i16>* %res_ptr + ret void +} +declare <8 x i16> @llvm.masked.load.v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>) + +define void @masked_load_v8i32(<8 x i32>* %a, <8 x i32>* %m_ptr, <8 x i32>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 8, e32,m2,ta,mu +; CHECK-NEXT: vle32.v v26, (a1) +; CHECK-NEXT: vmseq.vi v0, v26, 0 +; CHECK-NEXT: vle32.v v26, (a0), v0.t +; CHECK-NEXT: vse32.v v26, (a2) +; CHECK-NEXT: ret + %m = load <8 x i32>, <8 x i32>* %m_ptr + %mask = icmp eq <8 x i32> %m, zeroinitializer + %load = call <8 x i32> @llvm.masked.load.v8i32(<8 x i32>* %a, i32 8, <8 x i1> %mask, <8 x i32> undef) + store <8 x i32> %load, <8 x i32>* %res_ptr + ret void +} +declare <8 x i32> @llvm.masked.load.v8i32(<8 x i32>*, i32, <8 x i1>, <8 x i32>) + +define void @masked_load_v8i64(<8 x i64>* %a, <8 x i64>* %m_ptr, <8 x i64>* %res_ptr) nounwind { +; RV32-LABEL: masked_load_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 8, e64,m4,ta,mu +; RV32-NEXT: vle64.v v28, (a1) +; RV32-NEXT: vsetivli a1, 16, e32,m4,ta,mu +; RV32-NEXT: vmv.v.i v8, 0 +; RV32-NEXT: vsetivli a1, 8, e64,m4,ta,mu +; RV32-NEXT: vmseq.vv v0, v28, v8 +; RV32-NEXT: vle64.v v28, (a0), v0.t +; RV32-NEXT: vse64.v v28, (a2) +; RV32-NEXT: ret +; +; RV64-LABEL: masked_load_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 8, e64,m4,ta,mu +; RV64-NEXT: vle64.v v28, (a1) +; RV64-NEXT: vmseq.vi v0, v28, 0 +; RV64-NEXT: vle64.v v28, (a0), v0.t +; RV64-NEXT: vse64.v v28, (a2) +; RV64-NEXT: ret + %m = load <8 x i64>, <8 x i64>* %m_ptr + %mask = icmp eq <8 x i64> %m, zeroinitializer + %load = call <8 x i64> @llvm.masked.load.v8i64(<8 x i64>* %a, i32 8, <8 x i1> %mask, <8 x i64> undef) + store <8 x i64> %load, <8 x i64>* %res_ptr + ret void +} +declare <8 x i64> @llvm.masked.load.v8i64(<8 x i64>*, i32, <8 x i1>, <8 x i64>) + +define void @masked_load_v16i8(<16 x i8>* %a, <16 x i8>* %m_ptr, <16 x i8>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 16, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vle8.v v25, (a0), v0.t +; CHECK-NEXT: vse8.v v25, (a2) +; CHECK-NEXT: ret + %m = load <16 x i8>, <16 x i8>* %m_ptr + %mask = icmp eq <16 x i8> %m, zeroinitializer + %load = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %a, i32 8, <16 x i1> %mask, <16 x i8> undef) + store <16 x i8> %load, <16 x i8>* %res_ptr + ret void +} +declare <16 x i8> @llvm.masked.load.v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>) + +define void @masked_load_v16i16(<16 x i16>* %a, <16 x i16>* %m_ptr, <16 x i16>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 16, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a1) +; CHECK-NEXT: vmseq.vi v0, v26, 0 +; CHECK-NEXT: vle16.v v26, (a0), v0.t +; CHECK-NEXT: vse16.v v26, (a2) +; CHECK-NEXT: ret + %m = load <16 x i16>, <16 x i16>* %m_ptr + %mask = icmp eq <16 x i16> %m, zeroinitializer + %load = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %a, i32 8, <16 x i1> %mask, <16 x i16> undef) + store <16 x i16> %load, <16 x i16>* %res_ptr + ret void +} +declare <16 x i16> @llvm.masked.load.v16i16(<16 x i16>*, i32, <16 x i1>, <16 x i16>) + +define void @masked_load_v16i32(<16 x i32>* %a, <16 x i32>* %m_ptr, <16 x i32>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 16, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vmseq.vi v0, v28, 0 +; CHECK-NEXT: vle32.v v28, (a0), v0.t +; CHECK-NEXT: vse32.v v28, (a2) +; CHECK-NEXT: ret + %m = load <16 x i32>, <16 x i32>* %m_ptr + %mask = icmp eq <16 x i32> %m, zeroinitializer + %load = call <16 x i32> @llvm.masked.load.v16i32(<16 x i32>* %a, i32 8, <16 x i1> %mask, <16 x i32> undef) + store <16 x i32> %load, <16 x i32>* %res_ptr + ret void +} +declare <16 x i32> @llvm.masked.load.v16i32(<16 x i32>*, i32, <16 x i1>, <16 x i32>) + +define void @masked_load_v16i64(<16 x i64>* %a, <16 x i64>* %m_ptr, <16 x i64>* %res_ptr) nounwind { +; RV32-LABEL: masked_load_v16i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 16, e64,m8,ta,mu +; RV32-NEXT: vle64.v v8, (a1) +; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; RV32-NEXT: vmv.v.i v16, 0 +; RV32-NEXT: vsetivli a1, 16, e64,m8,ta,mu +; RV32-NEXT: vmseq.vv v0, v8, v16 +; RV32-NEXT: vsetivli a1, 16, e64,m8,tu,mu +; RV32-NEXT: vle64.v v8, (a0), v0.t +; RV32-NEXT: vsetivli a0, 16, e64,m8,ta,mu +; RV32-NEXT: vse64.v v8, (a2) +; RV32-NEXT: ret +; +; RV64-LABEL: masked_load_v16i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 16, e64,m8,ta,mu +; RV64-NEXT: vle64.v v8, (a1) +; RV64-NEXT: vmseq.vi v0, v8, 0 +; RV64-NEXT: vsetivli a1, 16, e64,m8,tu,mu +; RV64-NEXT: vle64.v v8, (a0), v0.t +; RV64-NEXT: vsetivli a0, 16, e64,m8,ta,mu +; RV64-NEXT: vse64.v v8, (a2) +; RV64-NEXT: ret + %m = load <16 x i64>, <16 x i64>* %m_ptr + %mask = icmp eq <16 x i64> %m, zeroinitializer + %load = call <16 x i64> @llvm.masked.load.v16i64(<16 x i64>* %a, i32 8, <16 x i1> %mask, <16 x i64> undef) + store <16 x i64> %load, <16 x i64>* %res_ptr + ret void +} +declare <16 x i64> @llvm.masked.load.v16i64(<16 x i64>*, i32, <16 x i1>, <16 x i64>) + +define void @masked_load_v32i8(<32 x i8>* %a, <32 x i8>* %m_ptr, <32 x i8>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: vsetvli a3, a3, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vmseq.vi v0, v26, 0 +; CHECK-NEXT: vle8.v v26, (a0), v0.t +; CHECK-NEXT: vse8.v v26, (a2) +; CHECK-NEXT: ret + %m = load <32 x i8>, <32 x i8>* %m_ptr + %mask = icmp eq <32 x i8> %m, zeroinitializer + %load = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %a, i32 8, <32 x i1> %mask, <32 x i8> undef) + store <32 x i8> %load, <32 x i8>* %res_ptr + ret void +} +declare <32 x i8> @llvm.masked.load.v32i8(<32 x i8>*, i32, <32 x i1>, <32 x i8>) + +define void @masked_load_v32i16(<32 x i16>* %a, <32 x i16>* %m_ptr, <32 x i16>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: vsetvli a3, a3, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a1) +; CHECK-NEXT: vmseq.vi v0, v28, 0 +; CHECK-NEXT: vle16.v v28, (a0), v0.t +; CHECK-NEXT: vse16.v v28, (a2) +; CHECK-NEXT: ret + %m = load <32 x i16>, <32 x i16>* %m_ptr + %mask = icmp eq <32 x i16> %m, zeroinitializer + %load = call <32 x i16> @llvm.masked.load.v32i16(<32 x i16>* %a, i32 8, <32 x i1> %mask, <32 x i16> undef) + store <32 x i16> %load, <32 x i16>* %res_ptr + ret void +} +declare <32 x i16> @llvm.masked.load.v32i16(<32 x i16>*, i32, <32 x i1>, <32 x i16>) + +define void @masked_load_v32i32(<32 x i32>* %a, <32 x i32>* %m_ptr, <32 x i32>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v32i32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: vsetvli a4, a3, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: vsetvli a1, a3, e32,m8,tu,mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli a0, a3, e32,m8,ta,mu +; CHECK-NEXT: vse32.v v8, (a2) +; CHECK-NEXT: ret + %m = load <32 x i32>, <32 x i32>* %m_ptr + %mask = icmp eq <32 x i32> %m, zeroinitializer + %load = call <32 x i32> @llvm.masked.load.v32i32(<32 x i32>* %a, i32 8, <32 x i1> %mask, <32 x i32> undef) + store <32 x i32> %load, <32 x i32>* %res_ptr + ret void +} +declare <32 x i32> @llvm.masked.load.v32i32(<32 x i32>*, i32, <32 x i1>, <32 x i32>) + +define void @masked_load_v32i64(<32 x i64>* %a, <32 x i64>* %m_ptr, <32 x i64>* %res_ptr) nounwind { +; RV32-LABEL: masked_load_v32i64: +; RV32: # %bb.0: +; RV32-NEXT: addi a3, a1, 128 +; RV32-NEXT: vsetivli a4, 16, e64,m8,ta,mu +; RV32-NEXT: vle64.v v8, (a3) +; RV32-NEXT: vle64.v v16, (a1) +; RV32-NEXT: addi a1, zero, 32 +; RV32-NEXT: vsetvli a1, a1, e32,m8,ta,mu +; RV32-NEXT: vmv.v.i v24, 0 +; RV32-NEXT: vsetivli a1, 16, e64,m8,ta,mu +; RV32-NEXT: vmseq.vv v0, v16, v24 +; RV32-NEXT: vmseq.vv v16, v8, v24 +; RV32-NEXT: vsetivli a1, 16, e64,m8,tu,mu +; RV32-NEXT: vle64.v v8, (a0), v0.t +; RV32-NEXT: addi a0, a0, 128 +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: vle64.v v16, (a0), v0.t +; RV32-NEXT: vsetivli a0, 16, e64,m8,ta,mu +; RV32-NEXT: vse64.v v8, (a2) +; RV32-NEXT: addi a0, a2, 128 +; RV32-NEXT: vse64.v v16, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: masked_load_v32i64: +; RV64: # %bb.0: +; RV64-NEXT: addi a3, a1, 128 +; RV64-NEXT: vsetivli a4, 16, e64,m8,ta,mu +; RV64-NEXT: vle64.v v8, (a1) +; RV64-NEXT: vle64.v v16, (a3) +; RV64-NEXT: vmseq.vi v0, v8, 0 +; RV64-NEXT: vmseq.vi v26, v16, 0 +; RV64-NEXT: vsetivli a1, 16, e64,m8,tu,mu +; RV64-NEXT: vle64.v v8, (a0), v0.t +; RV64-NEXT: addi a0, a0, 128 +; RV64-NEXT: vmv1r.v v0, v26 +; RV64-NEXT: vle64.v v16, (a0), v0.t +; RV64-NEXT: vsetivli a0, 16, e64,m8,ta,mu +; RV64-NEXT: vse64.v v8, (a2) +; RV64-NEXT: addi a0, a2, 128 +; RV64-NEXT: vse64.v v16, (a0) +; RV64-NEXT: ret + %m = load <32 x i64>, <32 x i64>* %m_ptr + %mask = icmp eq <32 x i64> %m, zeroinitializer + %load = call <32 x i64> @llvm.masked.load.v32i64(<32 x i64>* %a, i32 8, <32 x i1> %mask, <32 x i64> undef) + store <32 x i64> %load, <32 x i64>* %res_ptr + ret void +} +declare <32 x i64> @llvm.masked.load.v32i64(<32 x i64>*, i32, <32 x i1>, <32 x i64>) + +define void @masked_load_v64i8(<64 x i8>* %a, <64 x i8>* %m_ptr, <64 x i8>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: vsetvli a3, a3, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a1) +; CHECK-NEXT: vmseq.vi v0, v28, 0 +; CHECK-NEXT: vle8.v v28, (a0), v0.t +; CHECK-NEXT: vse8.v v28, (a2) +; CHECK-NEXT: ret + %m = load <64 x i8>, <64 x i8>* %m_ptr + %mask = icmp eq <64 x i8> %m, zeroinitializer + %load = call <64 x i8> @llvm.masked.load.v64i8(<64 x i8>* %a, i32 8, <64 x i1> %mask, <64 x i8> undef) + store <64 x i8> %load, <64 x i8>* %res_ptr + ret void +} +declare <64 x i8> @llvm.masked.load.v64i8(<64 x i8>*, i32, <64 x i1>, <64 x i8>) + +define void @masked_load_v64i16(<64 x i16>* %a, <64 x i16>* %m_ptr, <64 x i16>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v64i16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: vsetvli a4, a3, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: vsetvli a1, a3, e16,m8,tu,mu +; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli a0, a3, e16,m8,ta,mu +; CHECK-NEXT: vse16.v v8, (a2) +; CHECK-NEXT: ret + %m = load <64 x i16>, <64 x i16>* %m_ptr + %mask = icmp eq <64 x i16> %m, zeroinitializer + %load = call <64 x i16> @llvm.masked.load.v64i16(<64 x i16>* %a, i32 8, <64 x i1> %mask, <64 x i16> undef) + store <64 x i16> %load, <64 x i16>* %res_ptr + ret void +} +declare <64 x i16> @llvm.masked.load.v64i16(<64 x i16>*, i32, <64 x i1>, <64 x i16>) + +define void @masked_load_v64i32(<64 x i32>* %a, <64 x i32>* %m_ptr, <64 x i32>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v64i32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, a1, 128 +; CHECK-NEXT: addi a4, zero, 32 +; CHECK-NEXT: vsetvli a5, a4, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vle32.v v16, (a3) +; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: vmseq.vi v26, v16, 0 +; CHECK-NEXT: vsetvli a1, a4, e32,m8,tu,mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: addi a0, a0, 128 +; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vle32.v v16, (a0), v0.t +; CHECK-NEXT: vsetvli a0, a4, e32,m8,ta,mu +; CHECK-NEXT: vse32.v v8, (a2) +; CHECK-NEXT: addi a0, a2, 128 +; CHECK-NEXT: vse32.v v16, (a0) +; CHECK-NEXT: ret + %m = load <64 x i32>, <64 x i32>* %m_ptr + %mask = icmp eq <64 x i32> %m, zeroinitializer + %load = call <64 x i32> @llvm.masked.load.v64i32(<64 x i32>* %a, i32 8, <64 x i1> %mask, <64 x i32> undef) + store <64 x i32> %load, <64 x i32>* %res_ptr + ret void +} +declare <64 x i32> @llvm.masked.load.v64i32(<64 x i32>*, i32, <64 x i1>, <64 x i32>) + +define void @masked_load_v128i8(<128 x i8>* %a, <128 x i8>* %m_ptr, <128 x i8>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v128i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 128 +; CHECK-NEXT: vsetvli a4, a3, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: vsetvli a1, a3, e8,m8,tu,mu +; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: vsetvli a0, a3, e8,m8,ta,mu +; CHECK-NEXT: vse8.v v8, (a2) +; CHECK-NEXT: ret + %m = load <128 x i8>, <128 x i8>* %m_ptr + %mask = icmp eq <128 x i8> %m, zeroinitializer + %load = call <128 x i8> @llvm.masked.load.v128i8(<128 x i8>* %a, i32 8, <128 x i1> %mask, <128 x i8> undef) + store <128 x i8> %load, <128 x i8>* %res_ptr + ret void +} +declare <128 x i8> @llvm.masked.load.v128i8(<128 x i8>*, i32, <128 x i1>, <128 x i8>) + +define void @masked_load_v256i8(<256 x i8>* %a, <256 x i8>* %m_ptr, <256 x i8>* %res_ptr) nounwind { +; CHECK-LABEL: masked_load_v256i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, a1, 128 +; CHECK-NEXT: addi a4, zero, 128 +; CHECK-NEXT: vsetvli a5, a4, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vle8.v v16, (a3) +; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: vmseq.vi v26, v16, 0 +; CHECK-NEXT: vsetvli a1, a4, e8,m8,tu,mu +; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: addi a0, a0, 128 +; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vle8.v v16, (a0), v0.t +; CHECK-NEXT: vsetvli a0, a4, e8,m8,ta,mu +; CHECK-NEXT: vse8.v v8, (a2) +; CHECK-NEXT: addi a0, a2, 128 +; CHECK-NEXT: vse8.v v16, (a0) +; CHECK-NEXT: ret + %m = load <256 x i8>, <256 x i8>* %m_ptr + %mask = icmp eq <256 x i8> %m, zeroinitializer + %load = call <256 x i8> @llvm.masked.load.v256i8(<256 x i8>* %a, i32 8, <256 x i1> %mask, <256 x i8> undef) + store <256 x i8> %load, <256 x i8>* %res_ptr + ret void +} +declare <256 x i8> @llvm.masked.load.v256i8(<256 x i8>*, i32, <256 x i1>, <256 x i8>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll @@ -0,0 +1,478 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-v,+f,+d,+experimental-zfh -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+m,+experimental-v,+f,+d,+experimental-zfh -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +define void @masked_store_v1f16(<1 x half>* %val_ptr, <1 x half>* %a, <1 x half>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 1, e16,m1,ta,mu +; CHECK-NEXT: vle16.v v25, (a2) +; CHECK-NEXT: vle16.v v26, (a0) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v25, ft0 +; CHECK-NEXT: vse16.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <1 x half>, <1 x half>* %m_ptr + %mask = fcmp oeq <1 x half> %m, zeroinitializer + %val = load <1 x half>, <1 x half>* %val_ptr + call void @llvm.masked.store.v1f16.p0v1f16(<1 x half> %val, <1 x half>* %a, i32 8, <1 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v1f16.p0v1f16(<1 x half>, <1 x half>*, i32, <1 x i1>) + +define void @masked_store_v1f32(<1 x float>* %val_ptr, <1 x float>* %a, <1 x float>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 1, e32,m1,ta,mu +; CHECK-NEXT: vle32.v v25, (a2) +; CHECK-NEXT: vle32.v v26, (a0) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v25, ft0 +; CHECK-NEXT: vse32.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <1 x float>, <1 x float>* %m_ptr + %mask = fcmp oeq <1 x float> %m, zeroinitializer + %val = load <1 x float>, <1 x float>* %val_ptr + call void @llvm.masked.store.v1f32.p0v1f32(<1 x float> %val, <1 x float>* %a, i32 8, <1 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v1f32.p0v1f32(<1 x float>, <1 x float>*, i32, <1 x i1>) + +define void @masked_store_v1f64(<1 x double>* %val_ptr, <1 x double>* %a, <1 x double>* %m_ptr) nounwind { +; RV32-LABEL: masked_store_v1f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 1, e64,m1,ta,mu +; RV32-NEXT: vle64.v v25, (a2) +; RV32-NEXT: vle64.v v26, (a0) +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: vmfeq.vf v0, v25, ft0 +; RV32-NEXT: vse64.v v26, (a1), v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: masked_store_v1f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 1, e64,m1,ta,mu +; RV64-NEXT: vle64.v v25, (a2) +; RV64-NEXT: vle64.v v26, (a0) +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: vmfeq.vf v0, v25, ft0 +; RV64-NEXT: vse64.v v26, (a1), v0.t +; RV64-NEXT: ret + %m = load <1 x double>, <1 x double>* %m_ptr + %mask = fcmp oeq <1 x double> %m, zeroinitializer + %val = load <1 x double>, <1 x double>* %val_ptr + call void @llvm.masked.store.v1f64.p0v1f64(<1 x double> %val, <1 x double>* %a, i32 8, <1 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v1f64.p0v1f64(<1 x double>, <1 x double>*, i32, <1 x i1>) + +define void @masked_store_v2f16(<2 x half>* %val_ptr, <2 x half>* %a, <2 x half>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 2, e16,m1,ta,mu +; CHECK-NEXT: vle16.v v25, (a2) +; CHECK-NEXT: vle16.v v26, (a0) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v25, ft0 +; CHECK-NEXT: vse16.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <2 x half>, <2 x half>* %m_ptr + %mask = fcmp oeq <2 x half> %m, zeroinitializer + %val = load <2 x half>, <2 x half>* %val_ptr + call void @llvm.masked.store.v2f16.p0v2f16(<2 x half> %val, <2 x half>* %a, i32 8, <2 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v2f16.p0v2f16(<2 x half>, <2 x half>*, i32, <2 x i1>) + +define void @masked_store_v2f32(<2 x float>* %val_ptr, <2 x float>* %a, <2 x float>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 2, e32,m1,ta,mu +; CHECK-NEXT: vle32.v v25, (a2) +; CHECK-NEXT: vle32.v v26, (a0) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v25, ft0 +; CHECK-NEXT: vse32.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <2 x float>, <2 x float>* %m_ptr + %mask = fcmp oeq <2 x float> %m, zeroinitializer + %val = load <2 x float>, <2 x float>* %val_ptr + call void @llvm.masked.store.v2f32.p0v2f32(<2 x float> %val, <2 x float>* %a, i32 8, <2 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v2f32.p0v2f32(<2 x float>, <2 x float>*, i32, <2 x i1>) + +define void @masked_store_v2f64(<2 x double>* %val_ptr, <2 x double>* %a, <2 x double>* %m_ptr) nounwind { +; RV32-LABEL: masked_store_v2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 2, e64,m1,ta,mu +; RV32-NEXT: vle64.v v25, (a2) +; RV32-NEXT: vle64.v v26, (a0) +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: vmfeq.vf v0, v25, ft0 +; RV32-NEXT: vse64.v v26, (a1), v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: masked_store_v2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 2, e64,m1,ta,mu +; RV64-NEXT: vle64.v v25, (a2) +; RV64-NEXT: vle64.v v26, (a0) +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: vmfeq.vf v0, v25, ft0 +; RV64-NEXT: vse64.v v26, (a1), v0.t +; RV64-NEXT: ret + %m = load <2 x double>, <2 x double>* %m_ptr + %mask = fcmp oeq <2 x double> %m, zeroinitializer + %val = load <2 x double>, <2 x double>* %val_ptr + call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %a, i32 8, <2 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v2f64.p0v2f64(<2 x double>, <2 x double>*, i32, <2 x i1>) + +define void @masked_store_v4f16(<4 x half>* %val_ptr, <4 x half>* %a, <4 x half>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 4, e16,m1,ta,mu +; CHECK-NEXT: vle16.v v25, (a2) +; CHECK-NEXT: vle16.v v26, (a0) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v25, ft0 +; CHECK-NEXT: vse16.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <4 x half>, <4 x half>* %m_ptr + %mask = fcmp oeq <4 x half> %m, zeroinitializer + %val = load <4 x half>, <4 x half>* %val_ptr + call void @llvm.masked.store.v4f16.p0v4f16(<4 x half> %val, <4 x half>* %a, i32 8, <4 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v4f16.p0v4f16(<4 x half>, <4 x half>*, i32, <4 x i1>) + +define void @masked_store_v4f32(<4 x float>* %val_ptr, <4 x float>* %a, <4 x float>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 4, e32,m1,ta,mu +; CHECK-NEXT: vle32.v v25, (a2) +; CHECK-NEXT: vle32.v v26, (a0) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v25, ft0 +; CHECK-NEXT: vse32.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <4 x float>, <4 x float>* %m_ptr + %mask = fcmp oeq <4 x float> %m, zeroinitializer + %val = load <4 x float>, <4 x float>* %val_ptr + call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %val, <4 x float>* %a, i32 8, <4 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>) + +define void @masked_store_v4f64(<4 x double>* %val_ptr, <4 x double>* %a, <4 x double>* %m_ptr) nounwind { +; RV32-LABEL: masked_store_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 4, e64,m2,ta,mu +; RV32-NEXT: vle64.v v26, (a2) +; RV32-NEXT: vle64.v v28, (a0) +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: vmfeq.vf v0, v26, ft0 +; RV32-NEXT: vse64.v v28, (a1), v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: masked_store_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 4, e64,m2,ta,mu +; RV64-NEXT: vle64.v v26, (a2) +; RV64-NEXT: vle64.v v28, (a0) +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: vmfeq.vf v0, v26, ft0 +; RV64-NEXT: vse64.v v28, (a1), v0.t +; RV64-NEXT: ret + %m = load <4 x double>, <4 x double>* %m_ptr + %mask = fcmp oeq <4 x double> %m, zeroinitializer + %val = load <4 x double>, <4 x double>* %val_ptr + call void @llvm.masked.store.v4f64.p0v4f64(<4 x double> %val, <4 x double>* %a, i32 8, <4 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v4f64.p0v4f64(<4 x double>, <4 x double>*, i32, <4 x i1>) + +define void @masked_store_v8f16(<8 x half>* %val_ptr, <8 x half>* %a, <8 x half>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 8, e16,m1,ta,mu +; CHECK-NEXT: vle16.v v25, (a2) +; CHECK-NEXT: vle16.v v26, (a0) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v25, ft0 +; CHECK-NEXT: vse16.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <8 x half>, <8 x half>* %m_ptr + %mask = fcmp oeq <8 x half> %m, zeroinitializer + %val = load <8 x half>, <8 x half>* %val_ptr + call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %val, <8 x half>* %a, i32 8, <8 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, i32, <8 x i1>) + +define void @masked_store_v8f32(<8 x float>* %val_ptr, <8 x float>* %a, <8 x float>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 8, e32,m2,ta,mu +; CHECK-NEXT: vle32.v v26, (a2) +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v26, ft0 +; CHECK-NEXT: vse32.v v28, (a1), v0.t +; CHECK-NEXT: ret + %m = load <8 x float>, <8 x float>* %m_ptr + %mask = fcmp oeq <8 x float> %m, zeroinitializer + %val = load <8 x float>, <8 x float>* %val_ptr + call void @llvm.masked.store.v8f32.p0v8f32(<8 x float> %val, <8 x float>* %a, i32 8, <8 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v8f32.p0v8f32(<8 x float>, <8 x float>*, i32, <8 x i1>) + +define void @masked_store_v8f64(<8 x double>* %val_ptr, <8 x double>* %a, <8 x double>* %m_ptr) nounwind { +; RV32-LABEL: masked_store_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 8, e64,m4,ta,mu +; RV32-NEXT: vle64.v v28, (a2) +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: vmfeq.vf v0, v28, ft0 +; RV32-NEXT: vse64.v v8, (a1), v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: masked_store_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 8, e64,m4,ta,mu +; RV64-NEXT: vle64.v v28, (a2) +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: vmfeq.vf v0, v28, ft0 +; RV64-NEXT: vse64.v v8, (a1), v0.t +; RV64-NEXT: ret + %m = load <8 x double>, <8 x double>* %m_ptr + %mask = fcmp oeq <8 x double> %m, zeroinitializer + %val = load <8 x double>, <8 x double>* %val_ptr + call void @llvm.masked.store.v8f64.p0v8f64(<8 x double> %val, <8 x double>* %a, i32 8, <8 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v8f64.p0v8f64(<8 x double>, <8 x double>*, i32, <8 x i1>) + +define void @masked_store_v16f16(<16 x half>* %val_ptr, <16 x half>* %a, <16 x half>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 16, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a2) +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v26, ft0 +; CHECK-NEXT: vse16.v v28, (a1), v0.t +; CHECK-NEXT: ret + %m = load <16 x half>, <16 x half>* %m_ptr + %mask = fcmp oeq <16 x half> %m, zeroinitializer + %val = load <16 x half>, <16 x half>* %val_ptr + call void @llvm.masked.store.v16f16.p0v16f16(<16 x half> %val, <16 x half>* %a, i32 8, <16 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v16f16.p0v16f16(<16 x half>, <16 x half>*, i32, <16 x i1>) + +define void @masked_store_v16f32(<16 x float>* %val_ptr, <16 x float>* %a, <16 x float>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 16, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a2) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v28, ft0 +; CHECK-NEXT: vse32.v v8, (a1), v0.t +; CHECK-NEXT: ret + %m = load <16 x float>, <16 x float>* %m_ptr + %mask = fcmp oeq <16 x float> %m, zeroinitializer + %val = load <16 x float>, <16 x float>* %val_ptr + call void @llvm.masked.store.v16f32.p0v16f32(<16 x float> %val, <16 x float>* %a, i32 8, <16 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v16f32.p0v16f32(<16 x float>, <16 x float>*, i32, <16 x i1>) + +define void @masked_store_v16f64(<16 x double>* %val_ptr, <16 x double>* %a, <16 x double>* %m_ptr) nounwind { +; RV32-LABEL: masked_store_v16f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 16, e64,m8,ta,mu +; RV32-NEXT: vle64.v v8, (a2) +; RV32-NEXT: vle64.v v16, (a0) +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: vmfeq.vf v0, v8, ft0 +; RV32-NEXT: vse64.v v16, (a1), v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: masked_store_v16f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 16, e64,m8,ta,mu +; RV64-NEXT: vle64.v v8, (a2) +; RV64-NEXT: vle64.v v16, (a0) +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: vmfeq.vf v0, v8, ft0 +; RV64-NEXT: vse64.v v16, (a1), v0.t +; RV64-NEXT: ret + %m = load <16 x double>, <16 x double>* %m_ptr + %mask = fcmp oeq <16 x double> %m, zeroinitializer + %val = load <16 x double>, <16 x double>* %val_ptr + call void @llvm.masked.store.v16f64.p0v16f64(<16 x double> %val, <16 x double>* %a, i32 8, <16 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v16f64.p0v16f64(<16 x double>, <16 x double>*, i32, <16 x i1>) + +define void @masked_store_v32f16(<32 x half>* %val_ptr, <32 x half>* %a, <32 x half>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: vsetvli a3, a3, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a2) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v28, ft0 +; CHECK-NEXT: vse16.v v8, (a1), v0.t +; CHECK-NEXT: ret + %m = load <32 x half>, <32 x half>* %m_ptr + %mask = fcmp oeq <32 x half> %m, zeroinitializer + %val = load <32 x half>, <32 x half>* %val_ptr + call void @llvm.masked.store.v32f16.p0v32f16(<32 x half> %val, <32 x half>* %a, i32 8, <32 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v32f16.p0v32f16(<32 x half>, <32 x half>*, i32, <32 x i1>) + +define void @masked_store_v32f32(<32 x float>* %val_ptr, <32 x float>* %a, <32 x float>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v32f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: vsetvli a3, a3, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a2) +; CHECK-NEXT: vle32.v v16, (a0) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v8, ft0 +; CHECK-NEXT: vse32.v v16, (a1), v0.t +; CHECK-NEXT: ret + %m = load <32 x float>, <32 x float>* %m_ptr + %mask = fcmp oeq <32 x float> %m, zeroinitializer + %val = load <32 x float>, <32 x float>* %val_ptr + call void @llvm.masked.store.v32f32.p0v32f32(<32 x float> %val, <32 x float>* %a, i32 8, <32 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v32f32.p0v32f32(<32 x float>, <32 x float>*, i32, <32 x i1>) + +define void @masked_store_v32f64(<32 x double>* %val_ptr, <32 x double>* %a, <32 x double>* %m_ptr) nounwind { +; RV32-LABEL: masked_store_v32f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 16, e64,m8,ta,mu +; RV32-NEXT: addi a3, a2, 128 +; RV32-NEXT: vle64.v v8, (a3) +; RV32-NEXT: vle64.v v16, (a2) +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: vmfeq.vf v1, v8, ft0 +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: addi a0, a0, 128 +; RV32-NEXT: vle64.v v24, (a0) +; RV32-NEXT: vmfeq.vf v0, v16, ft0 +; RV32-NEXT: vse64.v v8, (a1), v0.t +; RV32-NEXT: addi a0, a1, 128 +; RV32-NEXT: vmv1r.v v0, v1 +; RV32-NEXT: vse64.v v24, (a0), v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: masked_store_v32f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 16, e64,m8,ta,mu +; RV64-NEXT: addi a3, a2, 128 +; RV64-NEXT: vle64.v v8, (a3) +; RV64-NEXT: vle64.v v16, (a2) +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: vmfeq.vf v1, v8, ft0 +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: addi a0, a0, 128 +; RV64-NEXT: vle64.v v24, (a0) +; RV64-NEXT: vmfeq.vf v0, v16, ft0 +; RV64-NEXT: vse64.v v8, (a1), v0.t +; RV64-NEXT: addi a0, a1, 128 +; RV64-NEXT: vmv1r.v v0, v1 +; RV64-NEXT: vse64.v v24, (a0), v0.t +; RV64-NEXT: ret + %m = load <32 x double>, <32 x double>* %m_ptr + %mask = fcmp oeq <32 x double> %m, zeroinitializer + %val = load <32 x double>, <32 x double>* %val_ptr + call void @llvm.masked.store.v32f32.p0v32f64(<32 x double> %val, <32 x double>* %a, i32 8, <32 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v32f32.p0v32f64(<32 x double>, <32 x double>*, i32, <32 x i1>) + +define void @masked_store_v64f16(<64 x half>* %val_ptr, <64 x half>* %a, <64 x half>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v64f16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: vsetvli a3, a3, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a2) +; CHECK-NEXT: vle16.v v16, (a0) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: vmfeq.vf v0, v8, ft0 +; CHECK-NEXT: vse16.v v16, (a1), v0.t +; CHECK-NEXT: ret + %m = load <64 x half>, <64 x half>* %m_ptr + %mask = fcmp oeq <64 x half> %m, zeroinitializer + %val = load <64 x half>, <64 x half>* %val_ptr + call void @llvm.masked.store.v64f16.p0v64f16(<64 x half> %val, <64 x half>* %a, i32 8, <64 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v64f16.p0v64f16(<64 x half>, <64 x half>*, i32, <64 x i1>) + +define void @masked_store_v64f32(<64 x float>* %val_ptr, <64 x float>* %a, <64 x float>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v64f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: vsetvli a3, a3, e32,m8,ta,mu +; CHECK-NEXT: addi a3, a2, 128 +; CHECK-NEXT: vle32.v v8, (a3) +; CHECK-NEXT: vle32.v v16, (a2) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: vmfeq.vf v1, v8, ft0 +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: addi a0, a0, 128 +; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vmfeq.vf v0, v16, ft0 +; CHECK-NEXT: vse32.v v8, (a1), v0.t +; CHECK-NEXT: addi a0, a1, 128 +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vse32.v v24, (a0), v0.t +; CHECK-NEXT: ret + %m = load <64 x float>, <64 x float>* %m_ptr + %mask = fcmp oeq <64 x float> %m, zeroinitializer + %val = load <64 x float>, <64 x float>* %val_ptr + call void @llvm.masked.store.v64f16.p0v64f32(<64 x float> %val, <64 x float>* %a, i32 8, <64 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v64f16.p0v64f32(<64 x float>, <64 x float>*, i32, <64 x i1>) + +define void @masked_store_v128f16(<128 x half>* %val_ptr, <128 x half>* %a, <128 x half>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v128f16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: vsetvli a3, a3, e16,m8,ta,mu +; CHECK-NEXT: addi a3, a2, 128 +; CHECK-NEXT: vle16.v v8, (a3) +; CHECK-NEXT: vle16.v v16, (a2) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: vmfeq.vf v1, v8, ft0 +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: addi a0, a0, 128 +; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vmfeq.vf v0, v16, ft0 +; CHECK-NEXT: vse16.v v8, (a1), v0.t +; CHECK-NEXT: addi a0, a1, 128 +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vse16.v v24, (a0), v0.t +; CHECK-NEXT: ret + %m = load <128 x half>, <128 x half>* %m_ptr + %mask = fcmp oeq <128 x half> %m, zeroinitializer + %val = load <128 x half>, <128 x half>* %val_ptr + call void @llvm.masked.store.v128f16.p0v128f16(<128 x half> %val, <128 x half>* %a, i32 8, <128 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v128f16.p0v128f16(<128 x half>, <128 x half>*, i32, <128 x i1>) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll @@ -0,0 +1,658 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+m,+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +define void @masked_store_v1i8(<1 x i8>* %val_ptr, <1 x i8>* %a, <1 x i8>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 1, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a2) +; CHECK-NEXT: vle8.v v26, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vse8.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <1 x i8>, <1 x i8>* %m_ptr + %mask = icmp eq <1 x i8> %m, zeroinitializer + %val = load <1 x i8>, <1 x i8>* %val_ptr + call void @llvm.masked.store.v1i8.p0v1i8(<1 x i8> %val, <1 x i8>* %a, i32 8, <1 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v1i8.p0v1i8(<1 x i8>, <1 x i8>*, i32, <1 x i1>) + +define void @masked_store_v1i16(<1 x i16>* %val_ptr, <1 x i16>* %a, <1 x i16>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 1, e16,m1,ta,mu +; CHECK-NEXT: vle16.v v25, (a2) +; CHECK-NEXT: vle16.v v26, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vse16.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <1 x i16>, <1 x i16>* %m_ptr + %mask = icmp eq <1 x i16> %m, zeroinitializer + %val = load <1 x i16>, <1 x i16>* %val_ptr + call void @llvm.masked.store.v1i16.p0v1i16(<1 x i16> %val, <1 x i16>* %a, i32 8, <1 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v1i16.p0v1i16(<1 x i16>, <1 x i16>*, i32, <1 x i1>) + +define void @masked_store_v1i32(<1 x i32>* %val_ptr, <1 x i32>* %a, <1 x i32>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 1, e32,m1,ta,mu +; CHECK-NEXT: vle32.v v25, (a2) +; CHECK-NEXT: vle32.v v26, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vse32.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <1 x i32>, <1 x i32>* %m_ptr + %mask = icmp eq <1 x i32> %m, zeroinitializer + %val = load <1 x i32>, <1 x i32>* %val_ptr + call void @llvm.masked.store.v1i32.p0v1i32(<1 x i32> %val, <1 x i32>* %a, i32 8, <1 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v1i32.p0v1i32(<1 x i32>, <1 x i32>*, i32, <1 x i1>) + +define void @masked_store_v1i64(<1 x i64>* %val_ptr, <1 x i64>* %a, <1 x i64>* %m_ptr) nounwind { +; RV32-LABEL: masked_store_v1i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 1, e64,m1,ta,mu +; RV32-NEXT: vle64.v v25, (a2) +; RV32-NEXT: vsetivli a2, 2, e32,m1,ta,mu +; RV32-NEXT: vmv.v.i v26, 0 +; RV32-NEXT: vsetivli a2, 1, e64,m1,ta,mu +; RV32-NEXT: vle64.v v27, (a0) +; RV32-NEXT: vmseq.vv v0, v25, v26 +; RV32-NEXT: vse64.v v27, (a1), v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: masked_store_v1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 1, e64,m1,ta,mu +; RV64-NEXT: vle64.v v25, (a2) +; RV64-NEXT: vle64.v v26, (a0) +; RV64-NEXT: vmseq.vi v0, v25, 0 +; RV64-NEXT: vse64.v v26, (a1), v0.t +; RV64-NEXT: ret + %m = load <1 x i64>, <1 x i64>* %m_ptr + %mask = icmp eq <1 x i64> %m, zeroinitializer + %val = load <1 x i64>, <1 x i64>* %val_ptr + call void @llvm.masked.store.v1i64.p0v1i64(<1 x i64> %val, <1 x i64>* %a, i32 8, <1 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v1i64.p0v1i64(<1 x i64>, <1 x i64>*, i32, <1 x i1>) + +define void @masked_store_v2i8(<2 x i8>* %val_ptr, <2 x i8>* %a, <2 x i8>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 2, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a2) +; CHECK-NEXT: vle8.v v26, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vse8.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <2 x i8>, <2 x i8>* %m_ptr + %mask = icmp eq <2 x i8> %m, zeroinitializer + %val = load <2 x i8>, <2 x i8>* %val_ptr + call void @llvm.masked.store.v2i8.p0v2i8(<2 x i8> %val, <2 x i8>* %a, i32 8, <2 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v2i8.p0v2i8(<2 x i8>, <2 x i8>*, i32, <2 x i1>) + +define void @masked_store_v2i16(<2 x i16>* %val_ptr, <2 x i16>* %a, <2 x i16>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 2, e16,m1,ta,mu +; CHECK-NEXT: vle16.v v25, (a2) +; CHECK-NEXT: vle16.v v26, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vse16.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <2 x i16>, <2 x i16>* %m_ptr + %mask = icmp eq <2 x i16> %m, zeroinitializer + %val = load <2 x i16>, <2 x i16>* %val_ptr + call void @llvm.masked.store.v2i16.p0v2i16(<2 x i16> %val, <2 x i16>* %a, i32 8, <2 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v2i16.p0v2i16(<2 x i16>, <2 x i16>*, i32, <2 x i1>) + +define void @masked_store_v2i32(<2 x i32>* %val_ptr, <2 x i32>* %a, <2 x i32>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 2, e32,m1,ta,mu +; CHECK-NEXT: vle32.v v25, (a2) +; CHECK-NEXT: vle32.v v26, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vse32.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <2 x i32>, <2 x i32>* %m_ptr + %mask = icmp eq <2 x i32> %m, zeroinitializer + %val = load <2 x i32>, <2 x i32>* %val_ptr + call void @llvm.masked.store.v2i32.p0v2i32(<2 x i32> %val, <2 x i32>* %a, i32 8, <2 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v2i32.p0v2i32(<2 x i32>, <2 x i32>*, i32, <2 x i1>) + +define void @masked_store_v2i64(<2 x i64>* %val_ptr, <2 x i64>* %a, <2 x i64>* %m_ptr) nounwind { +; RV32-LABEL: masked_store_v2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 2, e64,m1,ta,mu +; RV32-NEXT: vle64.v v25, (a2) +; RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu +; RV32-NEXT: vmv.v.i v26, 0 +; RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu +; RV32-NEXT: vle64.v v27, (a0) +; RV32-NEXT: vmseq.vv v0, v25, v26 +; RV32-NEXT: vse64.v v27, (a1), v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: masked_store_v2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 2, e64,m1,ta,mu +; RV64-NEXT: vle64.v v25, (a2) +; RV64-NEXT: vle64.v v26, (a0) +; RV64-NEXT: vmseq.vi v0, v25, 0 +; RV64-NEXT: vse64.v v26, (a1), v0.t +; RV64-NEXT: ret + %m = load <2 x i64>, <2 x i64>* %m_ptr + %mask = icmp eq <2 x i64> %m, zeroinitializer + %val = load <2 x i64>, <2 x i64>* %val_ptr + call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> %val, <2 x i64>* %a, i32 8, <2 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v2i64.p0v2i64(<2 x i64>, <2 x i64>*, i32, <2 x i1>) + +define void @masked_store_v4i8(<4 x i8>* %val_ptr, <4 x i8>* %a, <4 x i8>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 4, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a2) +; CHECK-NEXT: vle8.v v26, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vse8.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <4 x i8>, <4 x i8>* %m_ptr + %mask = icmp eq <4 x i8> %m, zeroinitializer + %val = load <4 x i8>, <4 x i8>* %val_ptr + call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %val, <4 x i8>* %a, i32 8, <4 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v4i8.p0v4i8(<4 x i8>, <4 x i8>*, i32, <4 x i1>) + +define void @masked_store_v4i16(<4 x i16>* %val_ptr, <4 x i16>* %a, <4 x i16>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 4, e16,m1,ta,mu +; CHECK-NEXT: vle16.v v25, (a2) +; CHECK-NEXT: vle16.v v26, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vse16.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <4 x i16>, <4 x i16>* %m_ptr + %mask = icmp eq <4 x i16> %m, zeroinitializer + %val = load <4 x i16>, <4 x i16>* %val_ptr + call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %val, <4 x i16>* %a, i32 8, <4 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v4i16.p0v4i16(<4 x i16>, <4 x i16>*, i32, <4 x i1>) + +define void @masked_store_v4i32(<4 x i32>* %val_ptr, <4 x i32>* %a, <4 x i32>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 4, e32,m1,ta,mu +; CHECK-NEXT: vle32.v v25, (a2) +; CHECK-NEXT: vle32.v v26, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vse32.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <4 x i32>, <4 x i32>* %m_ptr + %mask = icmp eq <4 x i32> %m, zeroinitializer + %val = load <4 x i32>, <4 x i32>* %val_ptr + call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %val, <4 x i32>* %a, i32 8, <4 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>) + +define void @masked_store_v4i64(<4 x i64>* %val_ptr, <4 x i64>* %a, <4 x i64>* %m_ptr) nounwind { +; RV32-LABEL: masked_store_v4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 4, e64,m2,ta,mu +; RV32-NEXT: vle64.v v26, (a2) +; RV32-NEXT: vsetivli a2, 8, e32,m2,ta,mu +; RV32-NEXT: vmv.v.i v28, 0 +; RV32-NEXT: vsetivli a2, 4, e64,m2,ta,mu +; RV32-NEXT: vle64.v v30, (a0) +; RV32-NEXT: vmseq.vv v0, v26, v28 +; RV32-NEXT: vse64.v v30, (a1), v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: masked_store_v4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 4, e64,m2,ta,mu +; RV64-NEXT: vle64.v v26, (a2) +; RV64-NEXT: vle64.v v28, (a0) +; RV64-NEXT: vmseq.vi v0, v26, 0 +; RV64-NEXT: vse64.v v28, (a1), v0.t +; RV64-NEXT: ret + %m = load <4 x i64>, <4 x i64>* %m_ptr + %mask = icmp eq <4 x i64> %m, zeroinitializer + %val = load <4 x i64>, <4 x i64>* %val_ptr + call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %val, <4 x i64>* %a, i32 8, <4 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v4i64.p0v4i64(<4 x i64>, <4 x i64>*, i32, <4 x i1>) + +define void @masked_store_v8i8(<8 x i8>* %val_ptr, <8 x i8>* %a, <8 x i8>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 8, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a2) +; CHECK-NEXT: vle8.v v26, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vse8.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <8 x i8>, <8 x i8>* %m_ptr + %mask = icmp eq <8 x i8> %m, zeroinitializer + %val = load <8 x i8>, <8 x i8>* %val_ptr + call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %val, <8 x i8>* %a, i32 8, <8 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v8i8.p0v8i8(<8 x i8>, <8 x i8>*, i32, <8 x i1>) + +define void @masked_store_v8i16(<8 x i16>* %val_ptr, <8 x i16>* %a, <8 x i16>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 8, e16,m1,ta,mu +; CHECK-NEXT: vle16.v v25, (a2) +; CHECK-NEXT: vle16.v v26, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vse16.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <8 x i16>, <8 x i16>* %m_ptr + %mask = icmp eq <8 x i16> %m, zeroinitializer + %val = load <8 x i16>, <8 x i16>* %val_ptr + call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %val, <8 x i16>* %a, i32 8, <8 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>) + +define void @masked_store_v8i32(<8 x i32>* %val_ptr, <8 x i32>* %a, <8 x i32>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 8, e32,m2,ta,mu +; CHECK-NEXT: vle32.v v26, (a2) +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vmseq.vi v0, v26, 0 +; CHECK-NEXT: vse32.v v28, (a1), v0.t +; CHECK-NEXT: ret + %m = load <8 x i32>, <8 x i32>* %m_ptr + %mask = icmp eq <8 x i32> %m, zeroinitializer + %val = load <8 x i32>, <8 x i32>* %val_ptr + call void @llvm.masked.store.v8i32.p0v8i32(<8 x i32> %val, <8 x i32>* %a, i32 8, <8 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v8i32.p0v8i32(<8 x i32>, <8 x i32>*, i32, <8 x i1>) + +define void @masked_store_v8i64(<8 x i64>* %val_ptr, <8 x i64>* %a, <8 x i64>* %m_ptr) nounwind { +; RV32-LABEL: masked_store_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 8, e64,m4,ta,mu +; RV32-NEXT: vle64.v v28, (a2) +; RV32-NEXT: vsetivli a2, 16, e32,m4,ta,mu +; RV32-NEXT: vmv.v.i v8, 0 +; RV32-NEXT: vsetivli a2, 8, e64,m4,ta,mu +; RV32-NEXT: vle64.v v12, (a0) +; RV32-NEXT: vmseq.vv v0, v28, v8 +; RV32-NEXT: vse64.v v12, (a1), v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: masked_store_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 8, e64,m4,ta,mu +; RV64-NEXT: vle64.v v28, (a2) +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: vmseq.vi v0, v28, 0 +; RV64-NEXT: vse64.v v8, (a1), v0.t +; RV64-NEXT: ret + %m = load <8 x i64>, <8 x i64>* %m_ptr + %mask = icmp eq <8 x i64> %m, zeroinitializer + %val = load <8 x i64>, <8 x i64>* %val_ptr + call void @llvm.masked.store.v8i64.p0v8i64(<8 x i64> %val, <8 x i64>* %a, i32 8, <8 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v8i64.p0v8i64(<8 x i64>, <8 x i64>*, i32, <8 x i1>) + +define void @masked_store_v16i8(<16 x i8>* %val_ptr, <16 x i8>* %a, <16 x i8>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 16, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a2) +; CHECK-NEXT: vle8.v v26, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vse8.v v26, (a1), v0.t +; CHECK-NEXT: ret + %m = load <16 x i8>, <16 x i8>* %m_ptr + %mask = icmp eq <16 x i8> %m, zeroinitializer + %val = load <16 x i8>, <16 x i8>* %val_ptr + call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %val, <16 x i8>* %a, i32 8, <16 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>) + +define void @masked_store_v16i16(<16 x i16>* %val_ptr, <16 x i16>* %a, <16 x i16>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 16, e16,m2,ta,mu +; CHECK-NEXT: vle16.v v26, (a2) +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vmseq.vi v0, v26, 0 +; CHECK-NEXT: vse16.v v28, (a1), v0.t +; CHECK-NEXT: ret + %m = load <16 x i16>, <16 x i16>* %m_ptr + %mask = icmp eq <16 x i16> %m, zeroinitializer + %val = load <16 x i16>, <16 x i16>* %val_ptr + call void @llvm.masked.store.v16i16.p0v16i16(<16 x i16> %val, <16 x i16>* %a, i32 8, <16 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v16i16.p0v16i16(<16 x i16>, <16 x i16>*, i32, <16 x i1>) + +define void @masked_store_v16i32(<16 x i32>* %val_ptr, <16 x i32>* %a, <16 x i32>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a3, 16, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a2) +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vmseq.vi v0, v28, 0 +; CHECK-NEXT: vse32.v v8, (a1), v0.t +; CHECK-NEXT: ret + %m = load <16 x i32>, <16 x i32>* %m_ptr + %mask = icmp eq <16 x i32> %m, zeroinitializer + %val = load <16 x i32>, <16 x i32>* %val_ptr + call void @llvm.masked.store.v16i32.p0v16i32(<16 x i32> %val, <16 x i32>* %a, i32 8, <16 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v16i32.p0v16i32(<16 x i32>, <16 x i32>*, i32, <16 x i1>) + +define void @masked_store_v16i64(<16 x i64>* %val_ptr, <16 x i64>* %a, <16 x i64>* %m_ptr) nounwind { +; RV32-LABEL: masked_store_v16i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli a3, 16, e64,m8,ta,mu +; RV32-NEXT: vle64.v v8, (a2) +; RV32-NEXT: addi a2, zero, 32 +; RV32-NEXT: vsetvli a2, a2, e32,m8,ta,mu +; RV32-NEXT: vmv.v.i v16, 0 +; RV32-NEXT: vsetivli a2, 16, e64,m8,ta,mu +; RV32-NEXT: vle64.v v24, (a0) +; RV32-NEXT: vmseq.vv v0, v8, v16 +; RV32-NEXT: vse64.v v24, (a1), v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: masked_store_v16i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 16, e64,m8,ta,mu +; RV64-NEXT: vle64.v v8, (a2) +; RV64-NEXT: vle64.v v16, (a0) +; RV64-NEXT: vmseq.vi v0, v8, 0 +; RV64-NEXT: vse64.v v16, (a1), v0.t +; RV64-NEXT: ret + %m = load <16 x i64>, <16 x i64>* %m_ptr + %mask = icmp eq <16 x i64> %m, zeroinitializer + %val = load <16 x i64>, <16 x i64>* %val_ptr + call void @llvm.masked.store.v16i64.p0v16i64(<16 x i64> %val, <16 x i64>* %a, i32 8, <16 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v16i64.p0v16i64(<16 x i64>, <16 x i64>*, i32, <16 x i1>) + +define void @masked_store_v32i8(<32 x i8>* %val_ptr, <32 x i8>* %a, <32 x i8>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: vsetvli a3, a3, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a2) +; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vmseq.vi v0, v26, 0 +; CHECK-NEXT: vse8.v v28, (a1), v0.t +; CHECK-NEXT: ret + %m = load <32 x i8>, <32 x i8>* %m_ptr + %mask = icmp eq <32 x i8> %m, zeroinitializer + %val = load <32 x i8>, <32 x i8>* %val_ptr + call void @llvm.masked.store.v32i8.p0v32i8(<32 x i8> %val, <32 x i8>* %a, i32 8, <32 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v32i8.p0v32i8(<32 x i8>, <32 x i8>*, i32, <32 x i1>) + +define void @masked_store_v32i16(<32 x i16>* %val_ptr, <32 x i16>* %a, <32 x i16>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: vsetvli a3, a3, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a2) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vmseq.vi v0, v28, 0 +; CHECK-NEXT: vse16.v v8, (a1), v0.t +; CHECK-NEXT: ret + %m = load <32 x i16>, <32 x i16>* %m_ptr + %mask = icmp eq <32 x i16> %m, zeroinitializer + %val = load <32 x i16>, <32 x i16>* %val_ptr + call void @llvm.masked.store.v32i16.p0v32i16(<32 x i16> %val, <32 x i16>* %a, i32 8, <32 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v32i16.p0v32i16(<32 x i16>, <32 x i16>*, i32, <32 x i1>) + +define void @masked_store_v32i32(<32 x i32>* %val_ptr, <32 x i32>* %a, <32 x i32>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v32i32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: vsetvli a3, a3, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a2) +; CHECK-NEXT: vle32.v v16, (a0) +; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: vse32.v v16, (a1), v0.t +; CHECK-NEXT: ret + %m = load <32 x i32>, <32 x i32>* %m_ptr + %mask = icmp eq <32 x i32> %m, zeroinitializer + %val = load <32 x i32>, <32 x i32>* %val_ptr + call void @llvm.masked.store.v32i32.p0v32i32(<32 x i32> %val, <32 x i32>* %a, i32 8, <32 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v32i32.p0v32i32(<32 x i32>, <32 x i32>*, i32, <32 x i1>) + +define void @masked_store_v32i64(<32 x i64>* %val_ptr, <32 x i64>* %a, <32 x i64>* %m_ptr) nounwind { +; RV32-LABEL: masked_store_v32i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 4 +; RV32-NEXT: sub sp, sp, a3 +; RV32-NEXT: addi a3, a2, 128 +; RV32-NEXT: vsetivli a4, 16, e64,m8,ta,mu +; RV32-NEXT: vle64.v v8, (a3) +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vle64.v v16, (a2) +; RV32-NEXT: addi a2, zero, 32 +; RV32-NEXT: vsetvli a2, a2, e32,m8,ta,mu +; RV32-NEXT: vmv.v.i v24, 0 +; RV32-NEXT: vsetivli a2, 16, e64,m8,ta,mu +; RV32-NEXT: vmseq.vv v1, v16, v24 +; RV32-NEXT: addi a2, a0, 128 +; RV32-NEXT: vle64.v v16, (a2) +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmseq.vv v0, v8, v24 +; RV32-NEXT: addi a0, a1, 128 +; RV32-NEXT: vse64.v v16, (a0), v0.t +; RV32-NEXT: vmv1r.v v0, v1 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vse64.v v8, (a1), v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: masked_store_v32i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli a3, 16, e64,m8,ta,mu +; RV64-NEXT: addi a3, a2, 128 +; RV64-NEXT: vle64.v v8, (a3) +; RV64-NEXT: vle64.v v16, (a2) +; RV64-NEXT: vmseq.vi v1, v8, 0 +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: addi a0, a0, 128 +; RV64-NEXT: vle64.v v24, (a0) +; RV64-NEXT: vmseq.vi v0, v16, 0 +; RV64-NEXT: vse64.v v8, (a1), v0.t +; RV64-NEXT: addi a0, a1, 128 +; RV64-NEXT: vmv1r.v v0, v1 +; RV64-NEXT: vse64.v v24, (a0), v0.t +; RV64-NEXT: ret + %m = load <32 x i64>, <32 x i64>* %m_ptr + %mask = icmp eq <32 x i64> %m, zeroinitializer + %val = load <32 x i64>, <32 x i64>* %val_ptr + call void @llvm.masked.store.v32i64.p0v32i64(<32 x i64> %val, <32 x i64>* %a, i32 8, <32 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v32i64.p0v32i64(<32 x i64>, <32 x i64>*, i32, <32 x i1>) + +define void @masked_store_v64i8(<64 x i8>* %val_ptr, <64 x i8>* %a, <64 x i8>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: vsetvli a3, a3, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a2) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vmseq.vi v0, v28, 0 +; CHECK-NEXT: vse8.v v8, (a1), v0.t +; CHECK-NEXT: ret + %m = load <64 x i8>, <64 x i8>* %m_ptr + %mask = icmp eq <64 x i8> %m, zeroinitializer + %val = load <64 x i8>, <64 x i8>* %val_ptr + call void @llvm.masked.store.v64i8.p0v64i8(<64 x i8> %val, <64 x i8>* %a, i32 8, <64 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v64i8.p0v64i8(<64 x i8>, <64 x i8>*, i32, <64 x i1>) + +define void @masked_store_v64i16(<64 x i16>* %val_ptr, <64 x i16>* %a, <64 x i16>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v64i16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: vsetvli a3, a3, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a2) +; CHECK-NEXT: vle16.v v16, (a0) +; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: vse16.v v16, (a1), v0.t +; CHECK-NEXT: ret + %m = load <64 x i16>, <64 x i16>* %m_ptr + %mask = icmp eq <64 x i16> %m, zeroinitializer + %val = load <64 x i16>, <64 x i16>* %val_ptr + call void @llvm.masked.store.v64i16.p0v64i16(<64 x i16> %val, <64 x i16>* %a, i32 8, <64 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v64i16.p0v64i16(<64 x i16>, <64 x i16>*, i32, <64 x i1>) + +define void @masked_store_v64i32(<64 x i32>* %val_ptr, <64 x i32>* %a, <64 x i32>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v64i32: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 32 +; CHECK-NEXT: vsetvli a3, a3, e32,m8,ta,mu +; CHECK-NEXT: addi a3, a2, 128 +; CHECK-NEXT: vle32.v v8, (a3) +; CHECK-NEXT: vle32.v v16, (a2) +; CHECK-NEXT: vmseq.vi v1, v8, 0 +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: addi a0, a0, 128 +; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: vse32.v v8, (a1), v0.t +; CHECK-NEXT: addi a0, a1, 128 +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vse32.v v24, (a0), v0.t +; CHECK-NEXT: ret + %m = load <64 x i32>, <64 x i32>* %m_ptr + %mask = icmp eq <64 x i32> %m, zeroinitializer + %val = load <64 x i32>, <64 x i32>* %val_ptr + call void @llvm.masked.store.v64i32.p0v64i32(<64 x i32> %val, <64 x i32>* %a, i32 8, <64 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v64i32.p0v64i32(<64 x i32>, <64 x i32>*, i32, <64 x i1>) + +define void @masked_store_v128i8(<128 x i8>* %val_ptr, <128 x i8>* %a, <128 x i8>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v128i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 128 +; CHECK-NEXT: vsetvli a3, a3, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a2) +; CHECK-NEXT: vle8.v v16, (a0) +; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: vse8.v v16, (a1), v0.t +; CHECK-NEXT: ret + %m = load <128 x i8>, <128 x i8>* %m_ptr + %mask = icmp eq <128 x i8> %m, zeroinitializer + %val = load <128 x i8>, <128 x i8>* %val_ptr + call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> %val, <128 x i8>* %a, i32 8, <128 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v128i8.p0v128i8(<128 x i8>, <128 x i8>*, i32, <128 x i1>) + +define void @masked_store_v128i16(<128 x i16>* %val_ptr, <128 x i16>* %a, <128 x i16>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v128i16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 64 +; CHECK-NEXT: vsetvli a3, a3, e16,m8,ta,mu +; CHECK-NEXT: addi a3, a2, 128 +; CHECK-NEXT: vle16.v v8, (a3) +; CHECK-NEXT: vle16.v v16, (a2) +; CHECK-NEXT: vmseq.vi v1, v8, 0 +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: addi a0, a0, 128 +; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: vse16.v v8, (a1), v0.t +; CHECK-NEXT: addi a0, a1, 128 +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vse16.v v24, (a0), v0.t +; CHECK-NEXT: ret + %m = load <128 x i16>, <128 x i16>* %m_ptr + %mask = icmp eq <128 x i16> %m, zeroinitializer + %val = load <128 x i16>, <128 x i16>* %val_ptr + call void @llvm.masked.store.v128i16.p0v128i16(<128 x i16> %val, <128 x i16>* %a, i32 8, <128 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v128i16.p0v128i16(<128 x i16>, <128 x i16>*, i32, <128 x i1>) + +define void @masked_store_v256i8(<256 x i8>* %val_ptr, <256 x i8>* %a, <256 x i8>* %m_ptr) nounwind { +; CHECK-LABEL: masked_store_v256i8: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a3, zero, 128 +; CHECK-NEXT: vsetvli a3, a3, e8,m8,ta,mu +; CHECK-NEXT: addi a3, a2, 128 +; CHECK-NEXT: vle8.v v8, (a3) +; CHECK-NEXT: vle8.v v16, (a2) +; CHECK-NEXT: vmseq.vi v1, v8, 0 +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: addi a0, a0, 128 +; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: vse8.v v8, (a1), v0.t +; CHECK-NEXT: addi a0, a1, 128 +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vse8.v v24, (a0), v0.t +; CHECK-NEXT: ret + %m = load <256 x i8>, <256 x i8>* %m_ptr + %mask = icmp eq <256 x i8> %m, zeroinitializer + %val = load <256 x i8>, <256 x i8>* %val_ptr + call void @llvm.masked.store.v256i8.p0v256i8(<256 x i8> %val, <256 x i8>* %a, i32 8, <256 x i1> %mask) + ret void +} +declare void @llvm.masked.store.v256i8.p0v256i8(<256 x i8>, <256 x i8>*, i32, <256 x i1>)