Index: llvm/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -262,6 +262,8 @@ setOperationAction(ISD::UMAX, VT, Legal); setOperationAction(ISD::ABS, VT, Legal); setOperationAction(ISD::SETCC, VT, Custom); + setOperationAction(ISD::MLOAD, VT, Custom); + setOperationAction(ISD::MSTORE, VT, Legal); // No native support for these. setOperationAction(ISD::UDIV, VT, Expand); @@ -303,6 +305,8 @@ setOperationAction(ISD::BUILD_VECTOR, VT.getVectorElementType(), Custom); setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal); setOperationAction(ISD::SETCC, VT, Custom); + setOperationAction(ISD::MLOAD, VT, Custom); + setOperationAction(ISD::MSTORE, VT, Legal); // Pre and Post inc are supported on loads and stores for (unsigned im = (unsigned)ISD::PRE_INC; @@ -8786,6 +8790,31 @@ Results.push_back(Upper); } +static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) { + MaskedLoadSDNode *N = cast(Op.getNode()); + MVT VT = Op.getSimpleValueType(); + SDValue Mask = N->getMask(); + SDValue PassThru = N->getPassThru(); + SDLoc dl(Op); + + if (ISD::isBuildVectorAllZeros(PassThru.getNode()) || + (PassThru->getOpcode() == ARMISD::VMOVIMM && + isNullConstant(PassThru->getOperand(0)))) + return Op; + + // MVE Masked loads use zero as the passthru value. Here we convert undef to + // zero too, and other values are lowered to a select. + SDValue ZeroVec = DAG.getNode(ARMISD::VMOVIMM, dl, VT, + DAG.getTargetConstant(0, dl, MVT::i32)); + SDValue NewLoad = DAG.getMaskedLoad( + VT, dl, N->getChain(), N->getBasePtr(), Mask, ZeroVec, N->getMemoryVT(), + N->getMemOperand(), N->getExtensionType(), N->isExpandingLoad()); + SDValue Combo = NewLoad; + if (!PassThru.isUndef()) + Combo = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru); + return DAG.getMergeValues({Combo, NewLoad.getValue(1)}, dl); +} + static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { if (isStrongerThanMonotonic(cast(Op)->getOrdering())) // Acquire/Release load/store is not legal for targets without a dmb or @@ -8985,6 +9014,8 @@ case ISD::UADDO: case ISD::USUBO: return LowerUnsignedALUO(Op, DAG); + case ISD::MLOAD: + return LowerMLOAD(Op, DAG); case ISD::ATOMIC_LOAD: case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); case ISD::FSINCOS: return LowerFSINCOS(Op, DAG); Index: llvm/lib/Target/ARM/ARMInstrMVE.td =================================================================== --- llvm/lib/Target/ARM/ARMInstrMVE.td +++ llvm/lib/Target/ARM/ARMInstrMVE.td @@ -4806,6 +4806,10 @@ PatFrag StoreKind, int shift> : Pat<(StoreKind (Ty MQPR:$val), t2addrmode_imm7:$addr), (RegImmInst (Ty MQPR:$val), t2addrmode_imm7:$addr)>; +class MVE_vector_maskedstore_typed + : Pat<(StoreKind (Ty MQPR:$val), t2addrmode_imm7:$addr, VCCR:$pred), + (RegImmInst (Ty MQPR:$val), t2addrmode_imm7:$addr, (i32 1), VCCR:$pred)>; multiclass MVE_vector_store { @@ -4822,6 +4826,10 @@ PatFrag LoadKind, int shift> : Pat<(Ty (LoadKind t2addrmode_imm7:$addr)), (Ty (RegImmInst t2addrmode_imm7:$addr))>; +class MVE_vector_maskedload_typed + : Pat<(Ty (LoadKind t2addrmode_imm7:$addr, VCCR:$pred, (Ty NEONimmAllZerosV))), + (Ty (RegImmInst t2addrmode_imm7:$addr, (i32 1), VCCR:$pred))>; multiclass MVE_vector_load { @@ -4867,6 +4875,28 @@ return cast(N)->getAlignment() >= 2; }]>; +def alignedmaskedload32 : PatFrag<(ops node:$ptr, node:$pred, node:$passthru), + (masked_ld node:$ptr, node:$pred, node:$passthru), [{ + return cast(N)->getAlignment() >= 4; +}]>; +def alignedmaskedload16 : PatFrag<(ops node:$ptr, node:$pred, node:$passthru), + (masked_ld node:$ptr, node:$pred, node:$passthru), [{ + return cast(N)->getAlignment() >= 2; +}]>; +def maskedload : PatFrag<(ops node:$ptr, node:$pred, node:$passthru), + (masked_ld node:$ptr, node:$pred, node:$passthru)>; + +def alignedmaskedstore32 : PatFrag<(ops node:$val, node:$ptr, node:$pred), + (masked_st node:$val, node:$ptr, node:$pred), [{ + return cast(N)->getAlignment() >= 4; +}]>; +def alignedmaskedstore16 : PatFrag<(ops node:$val, node:$ptr, node:$pred), + (masked_st node:$val, node:$ptr, node:$pred), [{ + return cast(N)->getAlignment() >= 2; +}]>; +def maskedstore : PatFrag<(ops node:$val, node:$ptr, node:$pred), + (masked_st node:$val, node:$ptr, node:$pred)>; + let Predicates = [HasMVEInt, IsLE] in { // Stores defm : MVE_vector_store; @@ -4885,6 +4915,26 @@ defm : MVE_vector_offset_store; defm : MVE_vector_offset_store; defm : MVE_vector_offset_store; + + // Unaligned masked stores (aligned are below) + def : Pat<(maskedstore (v4i32 MQPR:$val), t2addrmode_imm7<0>:$addr, VCCR:$pred), + (MVE_VSTRBU8 MQPR:$val, t2addrmode_imm7<0>:$addr, (i32 1), VCCR:$pred)>; + def : Pat<(maskedstore (v4f32 MQPR:$val), t2addrmode_imm7<0>:$addr, VCCR:$pred), + (MVE_VSTRBU8 MQPR:$val, t2addrmode_imm7<0>:$addr, (i32 1), VCCR:$pred)>; + def : Pat<(maskedstore (v8i16 MQPR:$val), t2addrmode_imm7<0>:$addr, VCCR:$pred), + (MVE_VSTRBU8 MQPR:$val, t2addrmode_imm7<0>:$addr, (i32 1), VCCR:$pred)>; + def : Pat<(maskedstore (v8f16 MQPR:$val), t2addrmode_imm7<0>:$addr, VCCR:$pred), + (MVE_VSTRBU8 MQPR:$val, t2addrmode_imm7<0>:$addr, (i32 1), VCCR:$pred)>; + + // Unaligned masked loads + def : Pat<(v4i32 (maskedload t2addrmode_imm7<0>:$addr, VCCR:$pred, (v4i32 NEONimmAllZerosV))), + (v4i32 (MVE_VLDRBU8 t2addrmode_imm7<0>:$addr, (i32 1), VCCR:$pred))>; + def : Pat<(v4f32 (maskedload t2addrmode_imm7<0>:$addr, VCCR:$pred, (v4f32 NEONimmAllZerosV))), + (v4f32 (MVE_VLDRBU8 t2addrmode_imm7<0>:$addr, (i32 1), VCCR:$pred))>; + def : Pat<(v8i16 (maskedload t2addrmode_imm7<0>:$addr, VCCR:$pred, (v8i16 NEONimmAllZerosV))), + (v8i16 (MVE_VLDRBU8 t2addrmode_imm7<0>:$addr, (i32 1), VCCR:$pred))>; + def : Pat<(v8f16 (maskedload t2addrmode_imm7<0>:$addr, VCCR:$pred, (v8f16 NEONimmAllZerosV))), + (v8f16 (MVE_VLDRBU8 t2addrmode_imm7<0>:$addr, (i32 1), VCCR:$pred))>; } let Predicates = [HasMVEInt, IsBE] in { @@ -4939,9 +4989,41 @@ def : MVE_vector_offset_store_typed; def : MVE_vector_offset_store_typed; def : MVE_vector_offset_store_typed; + + // Unaligned masked stores (aligned are below) + def : Pat<(maskedstore (v4i32 MQPR:$val), t2addrmode_imm7<0>:$addr, VCCR:$pred), + (MVE_VSTRBU8 (MVE_VREV32_8 MQPR:$val), t2addrmode_imm7<0>:$addr, (i32 1), VCCR:$pred)>; + def : Pat<(maskedstore (v4f32 MQPR:$val), t2addrmode_imm7<0>:$addr, VCCR:$pred), + (MVE_VSTRBU8 (MVE_VREV32_8 MQPR:$val), t2addrmode_imm7<0>:$addr, (i32 1), VCCR:$pred)>; + def : Pat<(maskedstore (v8i16 MQPR:$val), t2addrmode_imm7<0>:$addr, VCCR:$pred), + (MVE_VSTRBU8 (MVE_VREV16_8 MQPR:$val), t2addrmode_imm7<0>:$addr, (i32 1), VCCR:$pred)>; + def : Pat<(maskedstore (v8f16 MQPR:$val), t2addrmode_imm7<0>:$addr, VCCR:$pred), + (MVE_VSTRBU8 (MVE_VREV16_8 MQPR:$val), t2addrmode_imm7<0>:$addr, (i32 1), VCCR:$pred)>; + // Unaligned masked loads + def : Pat<(v4i32 (maskedload t2addrmode_imm7<0>:$addr, VCCR:$pred, (v4i32 NEONimmAllZerosV))), + (v4i32 (MVE_VREV32_8 (MVE_VLDRBU8 t2addrmode_imm7<0>:$addr, (i32 1), VCCR:$pred)))>; + def : Pat<(v4f32 (maskedload t2addrmode_imm7<0>:$addr, VCCR:$pred, (v4f32 NEONimmAllZerosV))), + (v4f32 (MVE_VREV32_8 (MVE_VLDRBU8 t2addrmode_imm7<0>:$addr, (i32 1), VCCR:$pred)))>; + def : Pat<(v8i16 (maskedload t2addrmode_imm7<0>:$addr, VCCR:$pred, (v8i16 NEONimmAllZerosV))), + (v8i16 (MVE_VREV16_8 (MVE_VLDRBU8 t2addrmode_imm7<0>:$addr, (i32 1), VCCR:$pred)))>; + def : Pat<(v8f16 (maskedload t2addrmode_imm7<0>:$addr, VCCR:$pred, (v8f16 NEONimmAllZerosV))), + (v8f16 (MVE_VREV16_8 (MVE_VLDRBU8 t2addrmode_imm7<0>:$addr, (i32 1), VCCR:$pred)))>; } let Predicates = [HasMVEInt] in { + // Aligned masked store, shared between LE and BE + def : MVE_vector_maskedstore_typed; + def : MVE_vector_maskedstore_typed; + def : MVE_vector_maskedstore_typed; + def : MVE_vector_maskedstore_typed; + def : MVE_vector_maskedstore_typed; + // Aligned masked loads + def : MVE_vector_maskedload_typed; + def : MVE_vector_maskedload_typed; + def : MVE_vector_maskedload_typed; + def : MVE_vector_maskedload_typed; + def : MVE_vector_maskedload_typed; + // Predicate loads def : Pat<(v16i1 (load t2addrmode_imm7<2>:$addr)), (v16i1 (VLDR_P0_off t2addrmode_imm7<2>:$addr))>; Index: llvm/lib/Target/ARM/ARMTargetTransformInfo.h =================================================================== --- llvm/lib/Target/ARM/ARMTargetTransformInfo.h +++ llvm/lib/Target/ARM/ARMTargetTransformInfo.h @@ -106,6 +106,20 @@ return !ST->isTargetDarwin() && !ST->hasMVEFloatOps(); } + bool isLegalMaskedLoad(Type *DataTy) { + if (!ST->hasMVEIntegerOps()) + return false; + + unsigned VecWidth = DataTy->getPrimitiveSizeInBits(); + if (VecWidth != 128) + return false; + + unsigned EltWidth = DataTy->getScalarSizeInBits(); + return EltWidth == 32 || EltWidth == 16 || EltWidth == 8; + } + + bool isLegalMaskedStore(Type *DataTy) { return isLegalMaskedLoad(DataTy); } + /// \name Scalar TTI Implementations /// @{ Index: llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll @@ -0,0 +1,519 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LE +; RUN: llc -mtriple=thumbebv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE + +define void @foo_v4i32_v4i32(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i32> *%src) { +; CHECK-LABEL: foo_v4i32_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldrw.u32 q0, [r1] +; CHECK-NEXT: vcmp.s32 gt, q0, zr +; CHECK-NEXT: vpstt +; CHECK-NEXT: vldrwt.u32 q0, [r2] +; CHECK-NEXT: vstrwt.32 q0, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = load <4 x i32>, <4 x i32>* %mask, align 4 + %1 = icmp sgt <4 x i32> %0, zeroinitializer + %2 = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %src, i32 4, <4 x i1> %1, <4 x i32> undef) + call void @llvm.masked.store.v4i32(<4 x i32> %2, <4 x i32>* %dest, i32 4, <4 x i1> %1) + ret void +} + +define void @foo_sext_v4i32_v4i8(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i8> *%src) { +; CHECK-LABEL: foo_sext_v4i32_v4i8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: vldrw.u32 q0, [r1] +; CHECK-NEXT: mov r3, sp +; CHECK-NEXT: vcmp.s32 gt, q0, zr +; CHECK-NEXT: @ implicit-def: $q0 +; CHECK-NEXT: vstr p0, [r3] +; CHECK-NEXT: ldrb.w r1, [sp] +; CHECK-NEXT: lsls r3, r1, #31 +; CHECK-NEXT: itt ne +; CHECK-NEXT: ldrbne r3, [r2] +; CHECK-NEXT: vmovne.32 q0[0], r3 +; CHECK-NEXT: lsls r3, r1, #30 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r3, [r2, #1] +; CHECK-NEXT: vmovmi.32 q0[1], r3 +; CHECK-NEXT: lsls r3, r1, #29 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r3, [r2, #2] +; CHECK-NEXT: vmovmi.32 q0[2], r3 +; CHECK-NEXT: lsls r1, r1, #28 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r1, [r2, #3] +; CHECK-NEXT: vmovmi.32 q0[3], r1 +; CHECK-NEXT: vmovlb.s8 q0, q0 +; CHECK-NEXT: vmovlb.s16 q0, q0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vstrwt.32 q0, [r0] +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: bx lr +entry: + %0 = load <4 x i32>, <4 x i32>* %mask, align 4 + %1 = icmp sgt <4 x i32> %0, zeroinitializer + %2 = call <4 x i8> @llvm.masked.load.v4i8(<4 x i8>* %src, i32 1, <4 x i1> %1, <4 x i8> undef) + %3 = sext <4 x i8> %2 to <4 x i32> + call void @llvm.masked.store.v4i32(<4 x i32> %3, <4 x i32>* %dest, i32 4, <4 x i1> %1) + ret void +} + +define void @foo_sext_v4i32_v4i16(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i16> *%src) { +; CHECK-LABEL: foo_sext_v4i32_v4i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: vldrw.u32 q0, [r1] +; CHECK-NEXT: mov r3, sp +; CHECK-NEXT: vcmp.s32 gt, q0, zr +; CHECK-NEXT: @ implicit-def: $q0 +; CHECK-NEXT: vstr p0, [r3] +; CHECK-NEXT: ldrb.w r1, [sp] +; CHECK-NEXT: lsls r3, r1, #31 +; CHECK-NEXT: itt ne +; CHECK-NEXT: ldrhne r3, [r2] +; CHECK-NEXT: vmovne.32 q0[0], r3 +; CHECK-NEXT: lsls r3, r1, #30 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrhmi r3, [r2, #2] +; CHECK-NEXT: vmovmi.32 q0[1], r3 +; CHECK-NEXT: lsls r3, r1, #29 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrhmi r3, [r2, #4] +; CHECK-NEXT: vmovmi.32 q0[2], r3 +; CHECK-NEXT: lsls r1, r1, #28 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrhmi r1, [r2, #6] +; CHECK-NEXT: vmovmi.32 q0[3], r1 +; CHECK-NEXT: vmovlb.s16 q0, q0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vstrwt.32 q0, [r0] +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: bx lr +entry: + %0 = load <4 x i32>, <4 x i32>* %mask, align 4 + %1 = icmp sgt <4 x i32> %0, zeroinitializer + %2 = call <4 x i16> @llvm.masked.load.v4i16(<4 x i16>* %src, i32 2, <4 x i1> %1, <4 x i16> undef) + %3 = sext <4 x i16> %2 to <4 x i32> + call void @llvm.masked.store.v4i32(<4 x i32> %3, <4 x i32>* %dest, i32 4, <4 x i1> %1) + ret void +} + +define void @foo_zext_v4i32_v4i8(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i8> *%src) { +; CHECK-LABEL: foo_zext_v4i32_v4i8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: vldrw.u32 q0, [r1] +; CHECK-NEXT: mov r3, sp +; CHECK-NEXT: vmov.i32 q1, #0xff +; CHECK-NEXT: vcmp.s32 gt, q0, zr +; CHECK-NEXT: @ implicit-def: $q0 +; CHECK-NEXT: vstr p0, [r3] +; CHECK-NEXT: ldrb.w r1, [sp] +; CHECK-NEXT: lsls r3, r1, #31 +; CHECK-NEXT: itt ne +; CHECK-NEXT: ldrbne r3, [r2] +; CHECK-NEXT: vmovne.32 q0[0], r3 +; CHECK-NEXT: lsls r3, r1, #30 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r3, [r2, #1] +; CHECK-NEXT: vmovmi.32 q0[1], r3 +; CHECK-NEXT: lsls r3, r1, #29 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r3, [r2, #2] +; CHECK-NEXT: vmovmi.32 q0[2], r3 +; CHECK-NEXT: lsls r1, r1, #28 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r1, [r2, #3] +; CHECK-NEXT: vmovmi.32 q0[3], r1 +; CHECK-NEXT: vand q0, q0, q1 +; CHECK-NEXT: vpst +; CHECK-NEXT: vstrwt.32 q0, [r0] +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: bx lr +entry: + %0 = load <4 x i32>, <4 x i32>* %mask, align 4 + %1 = icmp sgt <4 x i32> %0, zeroinitializer + %2 = call <4 x i8> @llvm.masked.load.v4i8(<4 x i8>* %src, i32 1, <4 x i1> %1, <4 x i8> undef) + %3 = zext <4 x i8> %2 to <4 x i32> + call void @llvm.masked.store.v4i32(<4 x i32> %3, <4 x i32>* %dest, i32 4, <4 x i1> %1) + ret void +} + +define void @foo_zext_v4i32_v4i16(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i16> *%src) { +; CHECK-LABEL: foo_zext_v4i32_v4i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: vldrw.u32 q0, [r1] +; CHECK-NEXT: mov r3, sp +; CHECK-NEXT: vcmp.s32 gt, q0, zr +; CHECK-NEXT: @ implicit-def: $q0 +; CHECK-NEXT: vstr p0, [r3] +; CHECK-NEXT: ldrb.w r1, [sp] +; CHECK-NEXT: lsls r3, r1, #31 +; CHECK-NEXT: itt ne +; CHECK-NEXT: ldrhne r3, [r2] +; CHECK-NEXT: vmovne.32 q0[0], r3 +; CHECK-NEXT: lsls r3, r1, #30 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrhmi r3, [r2, #2] +; CHECK-NEXT: vmovmi.32 q0[1], r3 +; CHECK-NEXT: lsls r3, r1, #29 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrhmi r3, [r2, #4] +; CHECK-NEXT: vmovmi.32 q0[2], r3 +; CHECK-NEXT: lsls r1, r1, #28 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrhmi r1, [r2, #6] +; CHECK-NEXT: vmovmi.32 q0[3], r1 +; CHECK-NEXT: vmovlb.u16 q0, q0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vstrwt.32 q0, [r0] +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: bx lr +entry: + %0 = load <4 x i32>, <4 x i32>* %mask, align 4 + %1 = icmp sgt <4 x i32> %0, zeroinitializer + %2 = call <4 x i16> @llvm.masked.load.v4i16(<4 x i16>* %src, i32 2, <4 x i1> %1, <4 x i16> undef) + %3 = zext <4 x i16> %2 to <4 x i32> + call void @llvm.masked.store.v4i32(<4 x i32> %3, <4 x i32>* %dest, i32 4, <4 x i1> %1) + ret void +} + +define void @foo_v8i16_v8i16(<8 x i16> *%dest, <8 x i16> *%mask, <8 x i16> *%src) { +; CHECK-LABEL: foo_v8i16_v8i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldrh.u16 q0, [r1] +; CHECK-NEXT: vcmp.s16 gt, q0, zr +; CHECK-NEXT: vpstt +; CHECK-NEXT: vldrht.u16 q0, [r2] +; CHECK-NEXT: vstrht.16 q0, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = load <8 x i16>, <8 x i16>* %mask, align 2 + %1 = icmp sgt <8 x i16> %0, zeroinitializer + %2 = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %src, i32 2, <8 x i1> %1, <8 x i16> undef) + call void @llvm.masked.store.v8i16(<8 x i16> %2, <8 x i16>* %dest, i32 2, <8 x i1> %1) + ret void +} + +define void @foo_sext_v8i16_v8i8(<8 x i16> *%dest, <8 x i16> *%mask, <8 x i8> *%src) { +; CHECK-LABEL: foo_sext_v8i16_v8i8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .pad #8 +; CHECK-NEXT: sub sp, #8 +; CHECK-NEXT: vldrh.u16 q0, [r1] +; CHECK-NEXT: mov r3, sp +; CHECK-NEXT: vcmp.s16 gt, q0, zr +; CHECK-NEXT: @ implicit-def: $q0 +; CHECK-NEXT: vstr p0, [r3] +; CHECK-NEXT: ldrb.w r1, [sp] +; CHECK-NEXT: lsls r3, r1, #31 +; CHECK-NEXT: itt ne +; CHECK-NEXT: ldrbne r3, [r2] +; CHECK-NEXT: vmovne.16 q0[0], r3 +; CHECK-NEXT: lsls r3, r1, #30 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r3, [r2, #1] +; CHECK-NEXT: vmovmi.16 q0[1], r3 +; CHECK-NEXT: lsls r3, r1, #29 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r3, [r2, #2] +; CHECK-NEXT: vmovmi.16 q0[2], r3 +; CHECK-NEXT: lsls r3, r1, #28 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r3, [r2, #3] +; CHECK-NEXT: vmovmi.16 q0[3], r3 +; CHECK-NEXT: lsls r3, r1, #27 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r3, [r2, #4] +; CHECK-NEXT: vmovmi.16 q0[4], r3 +; CHECK-NEXT: lsls r3, r1, #26 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r3, [r2, #5] +; CHECK-NEXT: vmovmi.16 q0[5], r3 +; CHECK-NEXT: lsls r3, r1, #25 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r3, [r2, #6] +; CHECK-NEXT: vmovmi.16 q0[6], r3 +; CHECK-NEXT: lsls r1, r1, #24 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r1, [r2, #7] +; CHECK-NEXT: vmovmi.16 q0[7], r1 +; CHECK-NEXT: vmovlb.s8 q0, q0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vstrht.16 q0, [r0] +; CHECK-NEXT: add sp, #8 +; CHECK-NEXT: bx lr +entry: + %0 = load <8 x i16>, <8 x i16>* %mask, align 2 + %1 = icmp sgt <8 x i16> %0, zeroinitializer + %2 = call <8 x i8> @llvm.masked.load.v8i8(<8 x i8>* %src, i32 1, <8 x i1> %1, <8 x i8> undef) + %3 = sext <8 x i8> %2 to <8 x i16> + call void @llvm.masked.store.v8i16(<8 x i16> %3, <8 x i16>* %dest, i32 2, <8 x i1> %1) + ret void +} + +define void @foo_zext_v8i16_v8i8(<8 x i16> *%dest, <8 x i16> *%mask, <8 x i8> *%src) { +; CHECK-LABEL: foo_zext_v8i16_v8i8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .pad #8 +; CHECK-NEXT: sub sp, #8 +; CHECK-NEXT: vldrh.u16 q0, [r1] +; CHECK-NEXT: mov r3, sp +; CHECK-NEXT: vcmp.s16 gt, q0, zr +; CHECK-NEXT: @ implicit-def: $q0 +; CHECK-NEXT: vstr p0, [r3] +; CHECK-NEXT: ldrb.w r1, [sp] +; CHECK-NEXT: lsls r3, r1, #31 +; CHECK-NEXT: itt ne +; CHECK-NEXT: ldrbne r3, [r2] +; CHECK-NEXT: vmovne.16 q0[0], r3 +; CHECK-NEXT: lsls r3, r1, #30 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r3, [r2, #1] +; CHECK-NEXT: vmovmi.16 q0[1], r3 +; CHECK-NEXT: lsls r3, r1, #29 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r3, [r2, #2] +; CHECK-NEXT: vmovmi.16 q0[2], r3 +; CHECK-NEXT: lsls r3, r1, #28 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r3, [r2, #3] +; CHECK-NEXT: vmovmi.16 q0[3], r3 +; CHECK-NEXT: lsls r3, r1, #27 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r3, [r2, #4] +; CHECK-NEXT: vmovmi.16 q0[4], r3 +; CHECK-NEXT: lsls r3, r1, #26 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r3, [r2, #5] +; CHECK-NEXT: vmovmi.16 q0[5], r3 +; CHECK-NEXT: lsls r3, r1, #25 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r3, [r2, #6] +; CHECK-NEXT: vmovmi.16 q0[6], r3 +; CHECK-NEXT: lsls r1, r1, #24 +; CHECK-NEXT: itt mi +; CHECK-NEXT: ldrbmi r1, [r2, #7] +; CHECK-NEXT: vmovmi.16 q0[7], r1 +; CHECK-NEXT: vmovlb.u8 q0, q0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vstrht.16 q0, [r0] +; CHECK-NEXT: add sp, #8 +; CHECK-NEXT: bx lr +entry: + %0 = load <8 x i16>, <8 x i16>* %mask, align 2 + %1 = icmp sgt <8 x i16> %0, zeroinitializer + %2 = call <8 x i8> @llvm.masked.load.v8i8(<8 x i8>* %src, i32 1, <8 x i1> %1, <8 x i8> undef) + %3 = zext <8 x i8> %2 to <8 x i16> + call void @llvm.masked.store.v8i16(<8 x i16> %3, <8 x i16>* %dest, i32 2, <8 x i1> %1) + ret void +} + +define void @foo_v16i8_v16i8(<16 x i8> *%dest, <16 x i8> *%mask, <16 x i8> *%src) { +; CHECK-LABEL: foo_v16i8_v16i8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldrb.u8 q0, [r1] +; CHECK-NEXT: vcmp.s8 gt, q0, zr +; CHECK-NEXT: vpstt +; CHECK-NEXT: vldrbt.u8 q0, [r2] +; CHECK-NEXT: vstrbt.8 q0, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = load <16 x i8>, <16 x i8>* %mask, align 1 + %1 = icmp sgt <16 x i8> %0, zeroinitializer + %2 = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %src, i32 1, <16 x i1> %1, <16 x i8> undef) + call void @llvm.masked.store.v16i8(<16 x i8> %2, <16 x i8>* %dest, i32 1, <16 x i1> %1) + ret void +} + +define void @foo_trunc_v8i8_v8i16(<8 x i8> *%dest, <8 x i16> *%mask, <8 x i16> *%src) { +; CHECK-LABEL: foo_trunc_v8i8_v8i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .pad #8 +; CHECK-NEXT: sub sp, #8 +; CHECK-NEXT: vldrh.u16 q0, [r1] +; CHECK-NEXT: mov r3, sp +; CHECK-NEXT: vcmp.s16 gt, q0, zr +; CHECK-NEXT: vstr p0, [r3] +; CHECK-NEXT: vpst +; CHECK-NEXT: vldrht.u16 q0, [r2] +; CHECK-NEXT: ldrb.w r1, [sp] +; CHECK-NEXT: lsls r2, r1, #31 +; CHECK-NEXT: itt ne +; CHECK-NEXT: vmovne.u16 r2, q0[0] +; CHECK-NEXT: strbne r2, [r0] +; CHECK-NEXT: lsls r2, r1, #30 +; CHECK-NEXT: itt mi +; CHECK-NEXT: vmovmi.u16 r2, q0[1] +; CHECK-NEXT: strbmi r2, [r0, #1] +; CHECK-NEXT: lsls r2, r1, #29 +; CHECK-NEXT: itt mi +; CHECK-NEXT: vmovmi.u16 r2, q0[2] +; CHECK-NEXT: strbmi r2, [r0, #2] +; CHECK-NEXT: lsls r2, r1, #28 +; CHECK-NEXT: itt mi +; CHECK-NEXT: vmovmi.u16 r2, q0[3] +; CHECK-NEXT: strbmi r2, [r0, #3] +; CHECK-NEXT: lsls r2, r1, #27 +; CHECK-NEXT: itt mi +; CHECK-NEXT: vmovmi.u16 r2, q0[4] +; CHECK-NEXT: strbmi r2, [r0, #4] +; CHECK-NEXT: lsls r2, r1, #26 +; CHECK-NEXT: itt mi +; CHECK-NEXT: vmovmi.u16 r2, q0[5] +; CHECK-NEXT: strbmi r2, [r0, #5] +; CHECK-NEXT: lsls r2, r1, #25 +; CHECK-NEXT: itt mi +; CHECK-NEXT: vmovmi.u16 r2, q0[6] +; CHECK-NEXT: strbmi r2, [r0, #6] +; CHECK-NEXT: lsls r1, r1, #24 +; CHECK-NEXT: itt mi +; CHECK-NEXT: vmovmi.u16 r1, q0[7] +; CHECK-NEXT: strbmi r1, [r0, #7] +; CHECK-NEXT: add sp, #8 +; CHECK-NEXT: bx lr +entry: + %0 = load <8 x i16>, <8 x i16>* %mask, align 2 + %1 = icmp sgt <8 x i16> %0, zeroinitializer + %2 = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %src, i32 2, <8 x i1> %1, <8 x i16> undef) + %3 = trunc <8 x i16> %2 to <8 x i8> + call void @llvm.masked.store.v8i8(<8 x i8> %3, <8 x i8>* %dest, i32 1, <8 x i1> %1) + ret void +} + +define void @foo_trunc_v4i8_v4i32(<4 x i8> *%dest, <4 x i32> *%mask, <4 x i32> *%src) { +; CHECK-LABEL: foo_trunc_v4i8_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: vldrw.u32 q0, [r1] +; CHECK-NEXT: mov r3, sp +; CHECK-NEXT: vcmp.s32 gt, q0, zr +; CHECK-NEXT: vstr p0, [r3] +; CHECK-NEXT: vpst +; CHECK-NEXT: vldrwt.u32 q0, [r2] +; CHECK-NEXT: ldrb.w r1, [sp] +; CHECK-NEXT: lsls r2, r1, #31 +; CHECK-NEXT: itt ne +; CHECK-NEXT: vmovne r2, s0 +; CHECK-NEXT: strbne r2, [r0] +; CHECK-NEXT: lsls r2, r1, #30 +; CHECK-NEXT: itt mi +; CHECK-NEXT: vmovmi r2, s1 +; CHECK-NEXT: strbmi r2, [r0, #1] +; CHECK-NEXT: lsls r2, r1, #29 +; CHECK-NEXT: itt mi +; CHECK-NEXT: vmovmi r2, s2 +; CHECK-NEXT: strbmi r2, [r0, #2] +; CHECK-NEXT: lsls r1, r1, #28 +; CHECK-NEXT: itt mi +; CHECK-NEXT: vmovmi r1, s3 +; CHECK-NEXT: strbmi r1, [r0, #3] +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: bx lr +entry: + %0 = load <4 x i32>, <4 x i32>* %mask, align 4 + %1 = icmp sgt <4 x i32> %0, zeroinitializer + %2 = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %src, i32 4, <4 x i1> %1, <4 x i32> undef) + %3 = trunc <4 x i32> %2 to <4 x i8> + call void @llvm.masked.store.v4i8(<4 x i8> %3, <4 x i8>* %dest, i32 1, <4 x i1> %1) + ret void +} + +define void @foo_trunc_v4i16_v4i32(<4 x i16> *%dest, <4 x i32> *%mask, <4 x i32> *%src) { +; CHECK-LABEL: foo_trunc_v4i16_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: vldrw.u32 q0, [r1] +; CHECK-NEXT: mov r3, sp +; CHECK-NEXT: vcmp.s32 gt, q0, zr +; CHECK-NEXT: vstr p0, [r3] +; CHECK-NEXT: vpst +; CHECK-NEXT: vldrwt.u32 q0, [r2] +; CHECK-NEXT: ldrb.w r1, [sp] +; CHECK-NEXT: lsls r2, r1, #31 +; CHECK-NEXT: itt ne +; CHECK-NEXT: vmovne r2, s0 +; CHECK-NEXT: strhne r2, [r0] +; CHECK-NEXT: lsls r2, r1, #30 +; CHECK-NEXT: itt mi +; CHECK-NEXT: vmovmi r2, s1 +; CHECK-NEXT: strhmi r2, [r0, #2] +; CHECK-NEXT: lsls r2, r1, #29 +; CHECK-NEXT: itt mi +; CHECK-NEXT: vmovmi r2, s2 +; CHECK-NEXT: strhmi r2, [r0, #4] +; CHECK-NEXT: lsls r1, r1, #28 +; CHECK-NEXT: itt mi +; CHECK-NEXT: vmovmi r1, s3 +; CHECK-NEXT: strhmi r1, [r0, #6] +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: bx lr +entry: + %0 = load <4 x i32>, <4 x i32>* %mask, align 4 + %1 = icmp sgt <4 x i32> %0, zeroinitializer + %2 = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %src, i32 4, <4 x i1> %1, <4 x i32> undef) + %3 = trunc <4 x i32> %2 to <4 x i16> + call void @llvm.masked.store.v4i16(<4 x i16> %3, <4 x i16>* %dest, i32 2, <4 x i1> %1) + ret void +} + +define void @foo_v4f32_v4f32(<4 x float> *%dest, <4 x i32> *%mask, <4 x float> *%src) { +; CHECK-LABEL: foo_v4f32_v4f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldrw.u32 q0, [r1] +; CHECK-NEXT: vcmp.s32 gt, q0, zr +; CHECK-NEXT: vpstt +; CHECK-NEXT: vldrwt.u32 q0, [r2] +; CHECK-NEXT: vstrwt.32 q0, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = load <4 x i32>, <4 x i32>* %mask, align 4 + %1 = icmp sgt <4 x i32> %0, zeroinitializer + %2 = call <4 x float> @llvm.masked.load.v4f32(<4 x float>* %src, i32 4, <4 x i1> %1, <4 x float> undef) + call void @llvm.masked.store.v4f32(<4 x float> %2, <4 x float>* %dest, i32 4, <4 x i1> %1) + ret void +} + +define void @foo_v8f16_v8f16(<8 x half> *%dest, <8 x i16> *%mask, <8 x half> *%src) { +; CHECK-LABEL: foo_v8f16_v8f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldrh.u16 q0, [r1] +; CHECK-NEXT: vcmp.s16 gt, q0, zr +; CHECK-NEXT: vpstt +; CHECK-NEXT: vldrht.u16 q0, [r2] +; CHECK-NEXT: vstrht.16 q0, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = load <8 x i16>, <8 x i16>* %mask, align 2 + %1 = icmp sgt <8 x i16> %0, zeroinitializer + %2 = call <8 x half> @llvm.masked.load.v8f16(<8 x half>* %src, i32 2, <8 x i1> %1, <8 x half> undef) + call void @llvm.masked.store.v8f16(<8 x half> %2, <8 x half>* %dest, i32 2, <8 x i1> %1) + ret void +} + +declare void @llvm.masked.store.v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>) +declare void @llvm.masked.store.v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>) +declare void @llvm.masked.store.v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>) +declare void @llvm.masked.store.v8f16(<8 x half>, <8 x half>*, i32, <8 x i1>) +declare void @llvm.masked.store.v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>) +declare <16 x i8> @llvm.masked.load.v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>) +declare <8 x i16> @llvm.masked.load.v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>) +declare <4 x i32> @llvm.masked.load.v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>) +declare <4 x float> @llvm.masked.load.v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>) +declare <8 x half> @llvm.masked.load.v8f16(<8 x half>*, i32, <8 x i1>, <8 x half>) + +declare void @llvm.masked.store.v8i8(<8 x i8>, <8 x i8>*, i32, <8 x i1>) +declare void @llvm.masked.store.v4i8(<4 x i8>, <4 x i8>*, i32, <4 x i1>) +declare void @llvm.masked.store.v4i16(<4 x i16>, <4 x i16>*, i32, <4 x i1>) +declare <4 x i16> @llvm.masked.load.v4i16(<4 x i16>*, i32, <4 x i1>, <4 x i16>) +declare <4 x i8> @llvm.masked.load.v4i8(<4 x i8>*, i32, <4 x i1>, <4 x i8>) +declare <8 x i8> @llvm.masked.load.v8i8(<8 x i8>*, i32, <8 x i1>, <8 x i8>) Index: llvm/test/CodeGen/Thumb2/mve-masked-load.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/mve-masked-load.ll @@ -0,0 +1,952 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LE +; RUN: llc -mtriple=thumbebv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE + +define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_zero(<4 x i32> *%dest, <4 x i32> %a) { +; CHECK-LE-LABEL: masked_v4i32_align4_zero: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrwt.u32 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4i32_align4_zero: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrwt.u32 q1, [r0] +; CHECK-BE-NEXT: vrev64.32 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <4 x i32> %a, zeroinitializer + %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 4, <4 x i1> %c, <4 x i32> zeroinitializer) + ret <4 x i32> %l +} + +define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_undef(<4 x i32> *%dest, <4 x i32> %a) { +; CHECK-LE-LABEL: masked_v4i32_align4_undef: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrwt.u32 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4i32_align4_undef: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrwt.u32 q1, [r0] +; CHECK-BE-NEXT: vrev64.32 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <4 x i32> %a, zeroinitializer + %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 4, <4 x i1> %c, <4 x i32> undef) + ret <4 x i32> %l +} + +define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align1_undef(<4 x i32> *%dest, <4 x i32> %a) { +; CHECK-LE-LABEL: masked_v4i32_align1_undef: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrbt.u8 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4i32_align1_undef: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrbt.u8 q0, [r0] +; CHECK-BE-NEXT: vrev32.8 q1, q0 +; CHECK-BE-NEXT: vrev64.32 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <4 x i32> %a, zeroinitializer + %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 1, <4 x i1> %c, <4 x i32> undef) + ret <4 x i32> %l +} + +define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_other(<4 x i32> *%dest, <4 x i32> %a) { +; CHECK-LE-LABEL: masked_v4i32_align4_other: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrwt.u32 q1, [r0] +; CHECK-LE-NEXT: vpsel q0, q1, q0 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4i32_align4_other: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrwt.u32 q0, [r0] +; CHECK-BE-NEXT: vpsel q1, q0, q1 +; CHECK-BE-NEXT: vrev64.32 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <4 x i32> %a, zeroinitializer + %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 4, <4 x i1> %c, <4 x i32> %a) + ret <4 x i32> %l +} + +define arm_aapcs_vfpcc i8* @masked_v4i32_preinc(i8* %x, i8* %y, <4 x i32> %a) { +; CHECK-LE-LABEL: masked_v4i32_preinc: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrwt.u32 q0, [r0, #4] +; CHECK-LE-NEXT: vstrw.32 q0, [r1] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4i32_preinc: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrwt.u32 q0, [r0, #4] +; CHECK-BE-NEXT: vstrw.32 q0, [r1] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %x, i32 4 + %0 = bitcast i8* %z to <4 x i32>* + %c = icmp sgt <4 x i32> %a, zeroinitializer + %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef) + %2 = bitcast i8* %y to <4 x i32>* + store <4 x i32> %1, <4 x i32>* %2, align 4 + ret i8* %z +} + +define arm_aapcs_vfpcc i8* @masked_v4i32_postinc(i8* %x, i8* %y, <4 x i32> %a) { +; CHECK-LE-LABEL: masked_v4i32_postinc: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrwt.u32 q0, [r0] +; CHECK-LE-NEXT: vstrw.32 q0, [r1] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4i32_postinc: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrwt.u32 q0, [r0] +; CHECK-BE-NEXT: vstrw.32 q0, [r1] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %x, i32 4 + %0 = bitcast i8* %x to <4 x i32>* + %c = icmp sgt <4 x i32> %a, zeroinitializer + %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef) + %2 = bitcast i8* %y to <4 x i32>* + store <4 x i32> %1, <4 x i32>* %2, align 4 + ret i8* %z +} + + + +define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align4_zero(<8 x i16> *%dest, <8 x i16> %a) { +; CHECK-LE-LABEL: masked_v8i16_align4_zero: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vmov.i32 q1, #0x0 +; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrht.u16 q0, [r0] +; CHECK-LE-NEXT: vpsel q0, q0, q1 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8i16_align4_zero: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vmov.i32 q1, #0x0 +; CHECK-BE-NEXT: vrev64.16 q2, q0 +; CHECK-BE-NEXT: vrev32.16 q1, q1 +; CHECK-BE-NEXT: vcmp.s16 gt, q2, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrht.u16 q0, [r0] +; CHECK-BE-NEXT: vpsel q1, q0, q1 +; CHECK-BE-NEXT: vrev64.16 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <8 x i16> %a, zeroinitializer + %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 2, <8 x i1> %c, <8 x i16> zeroinitializer) + ret <8 x i16> %l +} + +define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align4_undef(<8 x i16> *%dest, <8 x i16> %a) { +; CHECK-LE-LABEL: masked_v8i16_align4_undef: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrht.u16 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8i16_align4_undef: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.16 q1, q0 +; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrht.u16 q1, [r0] +; CHECK-BE-NEXT: vrev64.16 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <8 x i16> %a, zeroinitializer + %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 2, <8 x i1> %c, <8 x i16> undef) + ret <8 x i16> %l +} + +define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align1_undef(<8 x i16> *%dest, <8 x i16> %a) { +; CHECK-LE-LABEL: masked_v8i16_align1_undef: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrbt.u8 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8i16_align1_undef: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.16 q1, q0 +; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrbt.u8 q0, [r0] +; CHECK-BE-NEXT: vrev16.8 q1, q0 +; CHECK-BE-NEXT: vrev64.16 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <8 x i16> %a, zeroinitializer + %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 1, <8 x i1> %c, <8 x i16> undef) + ret <8 x i16> %l +} + +define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align4_other(<8 x i16> *%dest, <8 x i16> %a) { +; CHECK-LE-LABEL: masked_v8i16_align4_other: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrht.u16 q1, [r0] +; CHECK-LE-NEXT: vpsel q0, q1, q0 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8i16_align4_other: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.16 q1, q0 +; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrht.u16 q0, [r0] +; CHECK-BE-NEXT: vpsel q1, q0, q1 +; CHECK-BE-NEXT: vrev64.16 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <8 x i16> %a, zeroinitializer + %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 2, <8 x i1> %c, <8 x i16> %a) + ret <8 x i16> %l +} + +define i8* @masked_v8i16_preinc(i8* %x, i8* %y, <8 x i16> %a) { +; CHECK-LE-LABEL: masked_v8i16_preinc: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vldr d1, [sp] +; CHECK-LE-NEXT: vmov d0, r2, r3 +; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrht.u16 q0, [r0, #4] +; CHECK-LE-NEXT: vstrw.32 q0, [r1] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8i16_preinc: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vldr d1, [sp] +; CHECK-BE-NEXT: vmov d0, r3, r2 +; CHECK-BE-NEXT: vrev64.16 q1, q0 +; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrht.u16 q0, [r0, #4] +; CHECK-BE-NEXT: vstrh.16 q0, [r1] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %x, i32 4 + %0 = bitcast i8* %z to <8 x i16>* + %c = icmp sgt <8 x i16> %a, zeroinitializer + %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 4, <8 x i1> %c, <8 x i16> undef) + %2 = bitcast i8* %y to <8 x i16>* + store <8 x i16> %1, <8 x i16>* %2, align 4 + ret i8* %z +} + +define arm_aapcs_vfpcc i8* @masked_v8i16_postinc(i8* %x, i8* %y, <8 x i16> %a) { +; CHECK-LE-LABEL: masked_v8i16_postinc: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrht.u16 q0, [r0] +; CHECK-LE-NEXT: vstrw.32 q0, [r1] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8i16_postinc: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.16 q1, q0 +; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrht.u16 q0, [r0] +; CHECK-BE-NEXT: vstrh.16 q0, [r1] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %x, i32 4 + %0 = bitcast i8* %x to <8 x i16>* + %c = icmp sgt <8 x i16> %a, zeroinitializer + %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 4, <8 x i1> %c, <8 x i16> undef) + %2 = bitcast i8* %y to <8 x i16>* + store <8 x i16> %1, <8 x i16>* %2, align 4 + ret i8* %z +} + + +define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_zero(<16 x i8> *%dest, <16 x i8> %a) { +; CHECK-LE-LABEL: masked_v16i8_align4_zero: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vmov.i32 q1, #0x0 +; CHECK-LE-NEXT: vcmp.s8 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrbt.u8 q0, [r0] +; CHECK-LE-NEXT: vpsel q0, q0, q1 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v16i8_align4_zero: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vmov.i32 q1, #0x0 +; CHECK-BE-NEXT: vrev64.8 q2, q0 +; CHECK-BE-NEXT: vrev32.8 q1, q1 +; CHECK-BE-NEXT: vcmp.s8 gt, q2, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrbt.u8 q0, [r0] +; CHECK-BE-NEXT: vpsel q1, q0, q1 +; CHECK-BE-NEXT: vrev64.8 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <16 x i8> %a, zeroinitializer + %l = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %dest, i32 1, <16 x i1> %c, <16 x i8> zeroinitializer) + ret <16 x i8> %l +} + +define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_undef(<16 x i8> *%dest, <16 x i8> %a) { +; CHECK-LE-LABEL: masked_v16i8_align4_undef: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s8 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrbt.u8 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v16i8_align4_undef: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.8 q1, q0 +; CHECK-BE-NEXT: vcmp.s8 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrbt.u8 q1, [r0] +; CHECK-BE-NEXT: vrev64.8 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <16 x i8> %a, zeroinitializer + %l = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %dest, i32 1, <16 x i1> %c, <16 x i8> undef) + ret <16 x i8> %l +} + +define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_other(<16 x i8> *%dest, <16 x i8> %a) { +; CHECK-LE-LABEL: masked_v16i8_align4_other: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s8 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrbt.u8 q1, [r0] +; CHECK-LE-NEXT: vpsel q0, q1, q0 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v16i8_align4_other: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.8 q1, q0 +; CHECK-BE-NEXT: vcmp.s8 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrbt.u8 q0, [r0] +; CHECK-BE-NEXT: vpsel q1, q0, q1 +; CHECK-BE-NEXT: vrev64.8 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <16 x i8> %a, zeroinitializer + %l = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %dest, i32 1, <16 x i1> %c, <16 x i8> %a) + ret <16 x i8> %l +} + +define arm_aapcs_vfpcc i8* @masked_v16i8_preinc(i8* %x, i8* %y, <16 x i8> %a) { +; CHECK-LE-LABEL: masked_v16i8_preinc: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s8 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrbt.u8 q0, [r0, #4] +; CHECK-LE-NEXT: vstrw.32 q0, [r1] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v16i8_preinc: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.8 q1, q0 +; CHECK-BE-NEXT: vcmp.s8 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrbt.u8 q0, [r0, #4] +; CHECK-BE-NEXT: vstrb.8 q0, [r1] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %x, i32 4 + %0 = bitcast i8* %z to <16 x i8>* + %c = icmp sgt <16 x i8> %a, zeroinitializer + %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 4, <16 x i1> %c, <16 x i8> undef) + %2 = bitcast i8* %y to <16 x i8>* + store <16 x i8> %1, <16 x i8>* %2, align 4 + ret i8* %z +} + +define arm_aapcs_vfpcc i8* @masked_v16i8_postinc(i8* %x, i8* %y, <16 x i8> %a) { +; CHECK-LE-LABEL: masked_v16i8_postinc: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s8 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrbt.u8 q0, [r0] +; CHECK-LE-NEXT: vstrw.32 q0, [r1] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v16i8_postinc: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.8 q1, q0 +; CHECK-BE-NEXT: vcmp.s8 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrbt.u8 q0, [r0] +; CHECK-BE-NEXT: vstrb.8 q0, [r1] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %x, i32 4 + %0 = bitcast i8* %x to <16 x i8>* + %c = icmp sgt <16 x i8> %a, zeroinitializer + %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 4, <16 x i1> %c, <16 x i8> undef) + %2 = bitcast i8* %y to <16 x i8>* + store <16 x i8> %1, <16 x i8>* %2, align 4 + ret i8* %z +} + + +define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_zero(<4 x float> *%dest, <4 x i32> %a) { +; CHECK-LE-LABEL: masked_v4f32_align4_zero: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vmov.i32 q1, #0x0 +; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrwt.u32 q0, [r0] +; CHECK-LE-NEXT: vpsel q0, q0, q1 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4f32_align4_zero: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.32 q2, q0 +; CHECK-BE-NEXT: vmov.i32 q1, #0x0 +; CHECK-BE-NEXT: vcmp.s32 gt, q2, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrwt.u32 q0, [r0] +; CHECK-BE-NEXT: vpsel q1, q0, q1 +; CHECK-BE-NEXT: vrev64.32 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <4 x i32> %a, zeroinitializer + %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 4, <4 x i1> %c, <4 x float> zeroinitializer) + ret <4 x float> %l +} + +define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_undef(<4 x float> *%dest, <4 x i32> %a) { +; CHECK-LE-LABEL: masked_v4f32_align4_undef: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrwt.u32 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4f32_align4_undef: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrwt.u32 q1, [r0] +; CHECK-BE-NEXT: vrev64.32 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <4 x i32> %a, zeroinitializer + %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 4, <4 x i1> %c, <4 x float> undef) + ret <4 x float> %l +} + +define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align1_undef(<4 x float> *%dest, <4 x i32> %a) { +; CHECK-LE-LABEL: masked_v4f32_align1_undef: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrbt.u8 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4f32_align1_undef: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrbt.u8 q0, [r0] +; CHECK-BE-NEXT: vrev32.8 q1, q0 +; CHECK-BE-NEXT: vrev64.32 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <4 x i32> %a, zeroinitializer + %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 1, <4 x i1> %c, <4 x float> undef) + ret <4 x float> %l +} + +define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_other(<4 x float> *%dest, <4 x i32> %a, <4 x float> %b) { +; CHECK-LE-LABEL: masked_v4f32_align4_other: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrwt.u32 q0, [r0] +; CHECK-LE-NEXT: vpsel q0, q0, q1 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4f32_align4_other: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.32 q2, q1 +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrwt.u32 q0, [r0] +; CHECK-BE-NEXT: vpsel q1, q0, q2 +; CHECK-BE-NEXT: vrev64.32 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <4 x i32> %a, zeroinitializer + %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 4, <4 x i1> %c, <4 x float> %b) + ret <4 x float> %l +} + +define arm_aapcs_vfpcc i8* @masked_v4f32_preinc(i8* %x, i8* %y, <4 x i32> %a) { +; CHECK-LE-LABEL: masked_v4f32_preinc: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrwt.u32 q0, [r0, #4] +; CHECK-LE-NEXT: vstrw.32 q0, [r1] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4f32_preinc: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrwt.u32 q0, [r0, #4] +; CHECK-BE-NEXT: vstrw.32 q0, [r1] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %x, i32 4 + %0 = bitcast i8* %z to <4 x float>* + %c = icmp sgt <4 x i32> %a, zeroinitializer + %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef) + %2 = bitcast i8* %y to <4 x float>* + store <4 x float> %1, <4 x float>* %2, align 4 + ret i8* %z +} + +define arm_aapcs_vfpcc i8* @masked_v4f32_postinc(i8* %x, i8* %y, <4 x i32> %a) { +; CHECK-LE-LABEL: masked_v4f32_postinc: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrwt.u32 q0, [r0] +; CHECK-LE-NEXT: vstrw.32 q0, [r1] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4f32_postinc: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrwt.u32 q0, [r0] +; CHECK-BE-NEXT: vstrw.32 q0, [r1] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %x, i32 4 + %0 = bitcast i8* %x to <4 x float>* + %c = icmp sgt <4 x i32> %a, zeroinitializer + %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef) + %2 = bitcast i8* %y to <4 x float>* + store <4 x float> %1, <4 x float>* %2, align 4 + ret i8* %z +} + + +define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_zero(<8 x half> *%dest, <8 x i16> %a) { +; CHECK-LE-LABEL: masked_v8f16_align4_zero: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vmov.i32 q1, #0x0 +; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrht.u16 q0, [r0] +; CHECK-LE-NEXT: vpsel q0, q0, q1 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8f16_align4_zero: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vmov.i32 q1, #0x0 +; CHECK-BE-NEXT: vrev64.16 q2, q0 +; CHECK-BE-NEXT: vrev32.16 q1, q1 +; CHECK-BE-NEXT: vcmp.s16 gt, q2, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrht.u16 q0, [r0] +; CHECK-BE-NEXT: vpsel q1, q0, q1 +; CHECK-BE-NEXT: vrev64.16 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <8 x i16> %a, zeroinitializer + %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 2, <8 x i1> %c, <8 x half> zeroinitializer) + ret <8 x half> %l +} + +define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_undef(<8 x half> *%dest, <8 x i16> %a) { +; CHECK-LE-LABEL: masked_v8f16_align4_undef: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrht.u16 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8f16_align4_undef: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.16 q1, q0 +; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrht.u16 q1, [r0] +; CHECK-BE-NEXT: vrev64.16 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <8 x i16> %a, zeroinitializer + %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 2, <8 x i1> %c, <8 x half> undef) + ret <8 x half> %l +} + +define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest, <8 x i16> %a) { +; CHECK-LE-LABEL: masked_v8f16_align1_undef: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrbt.u8 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8f16_align1_undef: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.16 q1, q0 +; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrbt.u8 q0, [r0] +; CHECK-BE-NEXT: vrev16.8 q1, q0 +; CHECK-BE-NEXT: vrev64.16 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <8 x i16> %a, zeroinitializer + %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 1, <8 x i1> %c, <8 x half> undef) + ret <8 x half> %l +} + +define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_other(<8 x half> *%dest, <8 x i16> %a, <8 x half> %b) { +; CHECK-LE-LABEL: masked_v8f16_align4_other: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrht.u16 q0, [r0] +; CHECK-LE-NEXT: vpsel q0, q0, q1 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8f16_align4_other: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.16 q2, q1 +; CHECK-BE-NEXT: vrev64.16 q1, q0 +; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrht.u16 q0, [r0] +; CHECK-BE-NEXT: vpsel q1, q0, q2 +; CHECK-BE-NEXT: vrev64.16 q0, q1 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <8 x i16> %a, zeroinitializer + %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 2, <8 x i1> %c, <8 x half> %b) + ret <8 x half> %l +} + +define arm_aapcs_vfpcc i8* @masked_v8f16_preinc(i8* %x, i8* %y, <8 x i16> %a) { +; CHECK-LE-LABEL: masked_v8f16_preinc: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrht.u16 q0, [r0, #4] +; CHECK-LE-NEXT: vstrw.32 q0, [r1] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8f16_preinc: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.16 q1, q0 +; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrht.u16 q0, [r0, #4] +; CHECK-BE-NEXT: vstrh.16 q0, [r1] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %x, i32 4 + %0 = bitcast i8* %z to <8 x half>* + %c = icmp sgt <8 x i16> %a, zeroinitializer + %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 4, <8 x i1> %c, <8 x half> undef) + %2 = bitcast i8* %y to <8 x half>* + store <8 x half> %1, <8 x half>* %2, align 4 + ret i8* %z +} + +define arm_aapcs_vfpcc i8* @masked_v8f16_postinc(i8* %x, i8* %y, <8 x i16> %a) { +; CHECK-LE-LABEL: masked_v8f16_postinc: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vldrht.u16 q0, [r0] +; CHECK-LE-NEXT: vstrw.32 q0, [r1] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8f16_postinc: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.16 q1, q0 +; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vldrht.u16 q0, [r0] +; CHECK-BE-NEXT: vstrh.16 q0, [r1] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %x, i32 4 + %0 = bitcast i8* %x to <8 x half>* + %c = icmp sgt <8 x i16> %a, zeroinitializer + %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 4, <8 x i1> %c, <8 x half> undef) + %2 = bitcast i8* %y to <8 x half>* + store <8 x half> %1, <8 x half>* %2, align 4 + ret i8* %z +} + + +define arm_aapcs_vfpcc <2 x i64> @masked_v2i64_align4_zero(<2 x i64> *%dest, <2 x i64> %a) { +; CHECK-LE-LABEL: masked_v2i64_align4_zero: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: .pad #4 +; CHECK-LE-NEXT: sub sp, #4 +; CHECK-LE-NEXT: vmov r3, s0 +; CHECK-LE-NEXT: movs r2, #0 +; CHECK-LE-NEXT: vmov r1, s1 +; CHECK-LE-NEXT: vmov r12, s3 +; CHECK-LE-NEXT: rsbs r3, r3, #0 +; CHECK-LE-NEXT: vmov r3, s2 +; CHECK-LE-NEXT: sbcs.w r1, r2, r1 +; CHECK-LE-NEXT: mov.w r1, #0 +; CHECK-LE-NEXT: it lt +; CHECK-LE-NEXT: movlt r1, #1 +; CHECK-LE-NEXT: rsbs r3, r3, #0 +; CHECK-LE-NEXT: sbcs.w r3, r2, r12 +; CHECK-LE-NEXT: it lt +; CHECK-LE-NEXT: movlt r2, #1 +; CHECK-LE-NEXT: cmp r2, #0 +; CHECK-LE-NEXT: it ne +; CHECK-LE-NEXT: mvnne r2, #1 +; CHECK-LE-NEXT: bfi r2, r1, #0, #1 +; CHECK-LE-NEXT: and r1, r2, #3 +; CHECK-LE-NEXT: lsls r2, r2, #31 +; CHECK-LE-NEXT: beq .LBB29_2 +; CHECK-LE-NEXT: @ %bb.1: @ %cond.load +; CHECK-LE-NEXT: vldr d1, .LCPI29_0 +; CHECK-LE-NEXT: vldr d0, [r0] +; CHECK-LE-NEXT: b .LBB29_3 +; CHECK-LE-NEXT: .LBB29_2: +; CHECK-LE-NEXT: vmov.i32 q0, #0x0 +; CHECK-LE-NEXT: .LBB29_3: @ %else +; CHECK-LE-NEXT: lsls r1, r1, #30 +; CHECK-LE-NEXT: it mi +; CHECK-LE-NEXT: vldrmi d1, [r0, #8] +; CHECK-LE-NEXT: add sp, #4 +; CHECK-LE-NEXT: bx lr +; CHECK-LE-NEXT: .p2align 3 +; CHECK-LE-NEXT: @ %bb.4: +; CHECK-LE-NEXT: .LCPI29_0: +; CHECK-LE-NEXT: .long 0 @ double 0 +; CHECK-LE-NEXT: .long 0 +; +; CHECK-BE-LABEL: masked_v2i64_align4_zero: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: .pad #4 +; CHECK-BE-NEXT: sub sp, #4 +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: movs r2, #0 +; CHECK-BE-NEXT: vmov r3, s7 +; CHECK-BE-NEXT: vmov r1, s6 +; CHECK-BE-NEXT: vmov r12, s4 +; CHECK-BE-NEXT: rsbs r3, r3, #0 +; CHECK-BE-NEXT: vmov r3, s5 +; CHECK-BE-NEXT: sbcs.w r1, r2, r1 +; CHECK-BE-NEXT: mov.w r1, #0 +; CHECK-BE-NEXT: it lt +; CHECK-BE-NEXT: movlt r1, #1 +; CHECK-BE-NEXT: rsbs r3, r3, #0 +; CHECK-BE-NEXT: sbcs.w r3, r2, r12 +; CHECK-BE-NEXT: it lt +; CHECK-BE-NEXT: movlt r2, #1 +; CHECK-BE-NEXT: cmp r2, #0 +; CHECK-BE-NEXT: it ne +; CHECK-BE-NEXT: mvnne r2, #1 +; CHECK-BE-NEXT: bfi r2, r1, #0, #1 +; CHECK-BE-NEXT: and r1, r2, #3 +; CHECK-BE-NEXT: lsls r2, r2, #31 +; CHECK-BE-NEXT: beq .LBB29_2 +; CHECK-BE-NEXT: @ %bb.1: @ %cond.load +; CHECK-BE-NEXT: vldr d1, .LCPI29_0 +; CHECK-BE-NEXT: vldr d0, [r0] +; CHECK-BE-NEXT: b .LBB29_3 +; CHECK-BE-NEXT: .LBB29_2: +; CHECK-BE-NEXT: vmov.i32 q1, #0x0 +; CHECK-BE-NEXT: vrev64.32 q0, q1 +; CHECK-BE-NEXT: .LBB29_3: @ %else +; CHECK-BE-NEXT: lsls r1, r1, #30 +; CHECK-BE-NEXT: it mi +; CHECK-BE-NEXT: vldrmi d1, [r0, #8] +; CHECK-BE-NEXT: add sp, #4 +; CHECK-BE-NEXT: bx lr +; CHECK-BE-NEXT: .p2align 3 +; CHECK-BE-NEXT: @ %bb.4: +; CHECK-BE-NEXT: .LCPI29_0: +; CHECK-BE-NEXT: .long 0 @ double 0 +; CHECK-BE-NEXT: .long 0 +entry: + %c = icmp sgt <2 x i64> %a, zeroinitializer + %l = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %dest, i32 8, <2 x i1> %c, <2 x i64> zeroinitializer) + ret <2 x i64> %l +} + +define arm_aapcs_vfpcc <2 x double> @masked_v2f64_align4_zero(<2 x double> *%dest, <2 x double> %a, <2 x i64> %b) { +; CHECK-LE-LABEL: masked_v2f64_align4_zero: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: .pad #4 +; CHECK-LE-NEXT: sub sp, #4 +; CHECK-LE-NEXT: vmov r3, s4 +; CHECK-LE-NEXT: movs r2, #0 +; CHECK-LE-NEXT: vmov r1, s5 +; CHECK-LE-NEXT: vmov r12, s7 +; CHECK-LE-NEXT: rsbs r3, r3, #0 +; CHECK-LE-NEXT: vmov r3, s6 +; CHECK-LE-NEXT: sbcs.w r1, r2, r1 +; CHECK-LE-NEXT: mov.w r1, #0 +; CHECK-LE-NEXT: it lt +; CHECK-LE-NEXT: movlt r1, #1 +; CHECK-LE-NEXT: rsbs r3, r3, #0 +; CHECK-LE-NEXT: sbcs.w r3, r2, r12 +; CHECK-LE-NEXT: it lt +; CHECK-LE-NEXT: movlt r2, #1 +; CHECK-LE-NEXT: cmp r2, #0 +; CHECK-LE-NEXT: it ne +; CHECK-LE-NEXT: mvnne r2, #1 +; CHECK-LE-NEXT: bfi r2, r1, #0, #1 +; CHECK-LE-NEXT: and r1, r2, #3 +; CHECK-LE-NEXT: lsls r2, r2, #31 +; CHECK-LE-NEXT: beq .LBB30_2 +; CHECK-LE-NEXT: @ %bb.1: @ %cond.load +; CHECK-LE-NEXT: vldr d1, .LCPI30_0 +; CHECK-LE-NEXT: vldr d0, [r0] +; CHECK-LE-NEXT: b .LBB30_3 +; CHECK-LE-NEXT: .LBB30_2: +; CHECK-LE-NEXT: vmov.i32 q0, #0x0 +; CHECK-LE-NEXT: .LBB30_3: @ %else +; CHECK-LE-NEXT: lsls r1, r1, #30 +; CHECK-LE-NEXT: it mi +; CHECK-LE-NEXT: vldrmi d1, [r0, #8] +; CHECK-LE-NEXT: add sp, #4 +; CHECK-LE-NEXT: bx lr +; CHECK-LE-NEXT: .p2align 3 +; CHECK-LE-NEXT: @ %bb.4: +; CHECK-LE-NEXT: .LCPI30_0: +; CHECK-LE-NEXT: .long 0 @ double 0 +; CHECK-LE-NEXT: .long 0 +; +; CHECK-BE-LABEL: masked_v2f64_align4_zero: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: .pad #4 +; CHECK-BE-NEXT: sub sp, #4 +; CHECK-BE-NEXT: vrev64.32 q0, q1 +; CHECK-BE-NEXT: movs r2, #0 +; CHECK-BE-NEXT: vmov r3, s3 +; CHECK-BE-NEXT: vmov r1, s2 +; CHECK-BE-NEXT: vmov r12, s0 +; CHECK-BE-NEXT: rsbs r3, r3, #0 +; CHECK-BE-NEXT: vmov r3, s1 +; CHECK-BE-NEXT: sbcs.w r1, r2, r1 +; CHECK-BE-NEXT: mov.w r1, #0 +; CHECK-BE-NEXT: it lt +; CHECK-BE-NEXT: movlt r1, #1 +; CHECK-BE-NEXT: rsbs r3, r3, #0 +; CHECK-BE-NEXT: sbcs.w r3, r2, r12 +; CHECK-BE-NEXT: it lt +; CHECK-BE-NEXT: movlt r2, #1 +; CHECK-BE-NEXT: cmp r2, #0 +; CHECK-BE-NEXT: it ne +; CHECK-BE-NEXT: mvnne r2, #1 +; CHECK-BE-NEXT: bfi r2, r1, #0, #1 +; CHECK-BE-NEXT: and r1, r2, #3 +; CHECK-BE-NEXT: lsls r2, r2, #31 +; CHECK-BE-NEXT: beq .LBB30_2 +; CHECK-BE-NEXT: @ %bb.1: @ %cond.load +; CHECK-BE-NEXT: vldr d1, .LCPI30_0 +; CHECK-BE-NEXT: vldr d0, [r0] +; CHECK-BE-NEXT: b .LBB30_3 +; CHECK-BE-NEXT: .LBB30_2: +; CHECK-BE-NEXT: vmov.i32 q1, #0x0 +; CHECK-BE-NEXT: vrev64.32 q0, q1 +; CHECK-BE-NEXT: .LBB30_3: @ %else +; CHECK-BE-NEXT: lsls r1, r1, #30 +; CHECK-BE-NEXT: it mi +; CHECK-BE-NEXT: vldrmi d1, [r0, #8] +; CHECK-BE-NEXT: add sp, #4 +; CHECK-BE-NEXT: bx lr +; CHECK-BE-NEXT: .p2align 3 +; CHECK-BE-NEXT: @ %bb.4: +; CHECK-BE-NEXT: .LCPI30_0: +; CHECK-BE-NEXT: .long 0 @ double 0 +; CHECK-BE-NEXT: .long 0 +entry: + %c = icmp sgt <2 x i64> %b, zeroinitializer + %l = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %dest, i32 8, <2 x i1> %c, <2 x double> zeroinitializer) + ret <2 x double> %l +} + +declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>) +declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>) +declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>) +declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>) +declare <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>*, i32, <8 x i1>, <8 x half>) +declare <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>*, i32, <2 x i1>, <2 x i64>) +declare <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>*, i32, <2 x i1>, <2 x double>) Index: llvm/test/CodeGen/Thumb2/mve-masked-store.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/mve-masked-store.ll @@ -0,0 +1,704 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LE +; RUN: llc -mtriple=thumbebv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE + +define arm_aapcs_vfpcc void @masked_v4i32(<4 x i32> *%dest, <4 x i32> %a) { +; CHECK-LE-LABEL: masked_v4i32: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrwt.32 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4i32: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrwt.32 q1, [r0] +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <4 x i32> %a, zeroinitializer + call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a, <4 x i32>* %dest, i32 4, <4 x i1> %c) + ret void +} + +define arm_aapcs_vfpcc void @masked_v4i32_align1(<4 x i32> *%dest, <4 x i32> %a) { +; CHECK-LE-LABEL: masked_v4i32_align1: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrbt.8 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4i32_align1: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vrev32.8 q0, q1 +; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrbt.8 q0, [r0] +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <4 x i32> %a, zeroinitializer + call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a, <4 x i32>* %dest, i32 1, <4 x i1> %c) + ret void +} + +define i8* @masked_v4i32_pre(i8* %y, i8* %x, <4 x i32> %a) { +; CHECK-LE-LABEL: masked_v4i32_pre: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vldr d1, [sp] +; CHECK-LE-NEXT: vldrw.u32 q1, [r1] +; CHECK-LE-NEXT: vmov d0, r2, r3 +; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrwt.32 q1, [r0, #4] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4i32_pre: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vldr d1, [sp] +; CHECK-BE-NEXT: vldrw.u32 q1, [r1] +; CHECK-BE-NEXT: vmov d0, r3, r2 +; CHECK-BE-NEXT: vrev64.32 q2, q0 +; CHECK-BE-NEXT: vcmp.s32 gt, q2, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrwt.32 q1, [r0, #4] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %y, i32 4 + %0 = bitcast i8* %x to <4 x i32>* + %1 = load <4 x i32>, <4 x i32>* %0, align 4 + %2 = bitcast i8* %z to <4 x i32>* + %c = icmp sgt <4 x i32> %a, zeroinitializer + call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c) + ret i8* %z +} + +define i8* @masked_v4i32_post(i8* %y, i8* %x, <4 x i32> %a) { +; CHECK-LE-LABEL: masked_v4i32_post: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vldr d1, [sp] +; CHECK-LE-NEXT: vldrw.u32 q1, [r1] +; CHECK-LE-NEXT: vmov d0, r2, r3 +; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrwt.32 q1, [r0] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4i32_post: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vldr d1, [sp] +; CHECK-BE-NEXT: vldrw.u32 q1, [r1] +; CHECK-BE-NEXT: vmov d0, r3, r2 +; CHECK-BE-NEXT: vrev64.32 q2, q0 +; CHECK-BE-NEXT: vcmp.s32 gt, q2, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrwt.32 q1, [r0] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %y, i32 4 + %0 = bitcast i8* %x to <4 x i32>* + %1 = load <4 x i32>, <4 x i32>* %0, align 4 + %2 = bitcast i8* %y to <4 x i32>* + %c = icmp sgt <4 x i32> %a, zeroinitializer + call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c) + ret i8* %z +} + + +define arm_aapcs_vfpcc void @masked_v8i16(<8 x i16> *%dest, <8 x i16> %a) { +; CHECK-LE-LABEL: masked_v8i16: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrht.16 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8i16: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.16 q1, q0 +; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrht.16 q1, [r0] +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <8 x i16> %a, zeroinitializer + call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %a, <8 x i16>* %dest, i32 2, <8 x i1> %c) + ret void +} + +define arm_aapcs_vfpcc void @masked_v8i16_align1(<8 x i16> *%dest, <8 x i16> %a) { +; CHECK-LE-LABEL: masked_v8i16_align1: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrbt.8 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8i16_align1: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.16 q1, q0 +; CHECK-BE-NEXT: vrev16.8 q0, q1 +; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrbt.8 q0, [r0] +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <8 x i16> %a, zeroinitializer + call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %a, <8 x i16>* %dest, i32 1, <8 x i1> %c) + ret void +} + +define i8* @masked_v8i16_pre(i8* %y, i8* %x, <8 x i16> %a) { +; CHECK-LE-LABEL: masked_v8i16_pre: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vldr d1, [sp] +; CHECK-LE-NEXT: vldrw.u32 q1, [r1] +; CHECK-LE-NEXT: vmov d0, r2, r3 +; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrht.16 q1, [r0, #4] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8i16_pre: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vldr d1, [sp] +; CHECK-BE-NEXT: vldrh.u16 q1, [r1] +; CHECK-BE-NEXT: vmov d0, r3, r2 +; CHECK-BE-NEXT: vrev64.16 q2, q0 +; CHECK-BE-NEXT: vcmp.s16 gt, q2, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrht.16 q1, [r0, #4] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %y, i32 4 + %0 = bitcast i8* %x to <8 x i16>* + %1 = load <8 x i16>, <8 x i16>* %0, align 4 + %2 = bitcast i8* %z to <8 x i16>* + %c = icmp sgt <8 x i16> %a, zeroinitializer + call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c) + ret i8* %z +} + +define i8* @masked_v8i16_post(i8* %y, i8* %x, <8 x i16> %a) { +; CHECK-LE-LABEL: masked_v8i16_post: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vldr d1, [sp] +; CHECK-LE-NEXT: vldrw.u32 q1, [r1] +; CHECK-LE-NEXT: vmov d0, r2, r3 +; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrht.16 q1, [r0] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8i16_post: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vldr d1, [sp] +; CHECK-BE-NEXT: vldrh.u16 q1, [r1] +; CHECK-BE-NEXT: vmov d0, r3, r2 +; CHECK-BE-NEXT: vrev64.16 q2, q0 +; CHECK-BE-NEXT: vcmp.s16 gt, q2, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrht.16 q1, [r0] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %y, i32 4 + %0 = bitcast i8* %x to <8 x i16>* + %1 = load <8 x i16>, <8 x i16>* %0, align 4 + %2 = bitcast i8* %y to <8 x i16>* + %c = icmp sgt <8 x i16> %a, zeroinitializer + call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c) + ret i8* %z +} + + +define arm_aapcs_vfpcc void @masked_v16i8(<16 x i8> *%dest, <16 x i8> %a) { +; CHECK-LE-LABEL: masked_v16i8: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.s8 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrbt.8 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v16i8: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.8 q1, q0 +; CHECK-BE-NEXT: vcmp.s8 gt, q1, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrbt.8 q1, [r0] +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <16 x i8> %a, zeroinitializer + call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %a, <16 x i8>* %dest, i32 1, <16 x i1> %c) + ret void +} + +define i8* @masked_v16i8_pre(i8* %y, i8* %x, <16 x i8> %a) { +; CHECK-LE-LABEL: masked_v16i8_pre: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vldr d1, [sp] +; CHECK-LE-NEXT: vldrw.u32 q1, [r1] +; CHECK-LE-NEXT: vmov d0, r2, r3 +; CHECK-LE-NEXT: vcmp.s8 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrbt.8 q1, [r0, #4] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v16i8_pre: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vldr d1, [sp] +; CHECK-BE-NEXT: vldrb.u8 q1, [r1] +; CHECK-BE-NEXT: vmov d0, r3, r2 +; CHECK-BE-NEXT: vrev64.8 q2, q0 +; CHECK-BE-NEXT: vcmp.s8 gt, q2, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrbt.8 q1, [r0, #4] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %y, i32 4 + %0 = bitcast i8* %x to <16 x i8>* + %1 = load <16 x i8>, <16 x i8>* %0, align 4 + %2 = bitcast i8* %z to <16 x i8>* + %c = icmp sgt <16 x i8> %a, zeroinitializer + call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c) + ret i8* %z +} + +define i8* @masked_v16i8_post(i8* %y, i8* %x, <16 x i8> %a) { +; CHECK-LE-LABEL: masked_v16i8_post: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vldr d1, [sp] +; CHECK-LE-NEXT: vldrw.u32 q1, [r1] +; CHECK-LE-NEXT: vmov d0, r2, r3 +; CHECK-LE-NEXT: vcmp.s8 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrbt.8 q1, [r0] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v16i8_post: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vldr d1, [sp] +; CHECK-BE-NEXT: vldrb.u8 q1, [r1] +; CHECK-BE-NEXT: vmov d0, r3, r2 +; CHECK-BE-NEXT: vrev64.8 q2, q0 +; CHECK-BE-NEXT: vcmp.s8 gt, q2, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrbt.8 q1, [r0] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %y, i32 4 + %0 = bitcast i8* %x to <16 x i8>* + %1 = load <16 x i8>, <16 x i8>* %0, align 4 + %2 = bitcast i8* %y to <16 x i8>* + %c = icmp sgt <16 x i8> %a, zeroinitializer + call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c) + ret i8* %z +} + + +define arm_aapcs_vfpcc void @masked_v4f32(<4 x float> *%dest, <4 x float> %a, <4 x i32> %b) { +; CHECK-LE-LABEL: masked_v4f32: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.i32 ne, q1, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrwt.32 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4f32: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.32 q2, q1 +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vcmp.i32 ne, q2, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrwt.32 q1, [r0] +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp ugt <4 x i32> %b, zeroinitializer + call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %a, <4 x float>* %dest, i32 4, <4 x i1> %c) + ret void +} + +define arm_aapcs_vfpcc void @masked_v4f32_align1(<4 x float> *%dest, <4 x float> %a, <4 x i32> %b) { +; CHECK-LE-LABEL: masked_v4f32_align1: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.i32 ne, q1, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrbt.8 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4f32_align1: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.32 q2, q1 +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vrev32.8 q0, q1 +; CHECK-BE-NEXT: vcmp.i32 ne, q2, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrbt.8 q0, [r0] +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp ugt <4 x i32> %b, zeroinitializer + call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %a, <4 x float>* %dest, i32 1, <4 x i1> %c) + ret void +} + +define i8* @masked_v4f32_pre(i8* %y, i8* %x, <4 x i32> %a) { +; CHECK-LE-LABEL: masked_v4f32_pre: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vldr d1, [sp] +; CHECK-LE-NEXT: vldrw.u32 q1, [r1] +; CHECK-LE-NEXT: vmov d0, r2, r3 +; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrwt.32 q1, [r0, #4] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4f32_pre: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vldr d1, [sp] +; CHECK-BE-NEXT: vldrw.u32 q1, [r1] +; CHECK-BE-NEXT: vmov d0, r3, r2 +; CHECK-BE-NEXT: vrev64.32 q2, q0 +; CHECK-BE-NEXT: vcmp.s32 gt, q2, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrwt.32 q1, [r0, #4] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %y, i32 4 + %0 = bitcast i8* %x to <4 x float>* + %1 = load <4 x float>, <4 x float>* %0, align 4 + %2 = bitcast i8* %z to <4 x float>* + %c = icmp sgt <4 x i32> %a, zeroinitializer + call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c) + ret i8* %z +} + +define i8* @masked_v4f32_post(i8* %y, i8* %x, <4 x i32> %a) { +; CHECK-LE-LABEL: masked_v4f32_post: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vldr d1, [sp] +; CHECK-LE-NEXT: vldrw.u32 q1, [r1] +; CHECK-LE-NEXT: vmov d0, r2, r3 +; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrwt.32 q1, [r0] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v4f32_post: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vldr d1, [sp] +; CHECK-BE-NEXT: vldrw.u32 q1, [r1] +; CHECK-BE-NEXT: vmov d0, r3, r2 +; CHECK-BE-NEXT: vrev64.32 q2, q0 +; CHECK-BE-NEXT: vcmp.s32 gt, q2, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrwt.32 q1, [r0] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %y, i32 4 + %0 = bitcast i8* %x to <4 x float>* + %1 = load <4 x float>, <4 x float>* %0, align 4 + %2 = bitcast i8* %y to <4 x float>* + %c = icmp sgt <4 x i32> %a, zeroinitializer + call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c) + ret i8* %z +} + + +define arm_aapcs_vfpcc void @masked_v8f16(<8 x half> *%dest, <8 x half> %a, <8 x i16> %b) { +; CHECK-LE-LABEL: masked_v8f16: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.i16 ne, q1, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrht.16 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8f16: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.16 q2, q1 +; CHECK-BE-NEXT: vrev64.16 q1, q0 +; CHECK-BE-NEXT: vcmp.i16 ne, q2, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrht.16 q1, [r0] +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp ugt <8 x i16> %b, zeroinitializer + call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %a, <8 x half>* %dest, i32 2, <8 x i1> %c) + ret void +} + +define arm_aapcs_vfpcc void @masked_v8f16_align1(<8 x half> *%dest, <8 x half> %a, <8 x i16> %b) { +; CHECK-LE-LABEL: masked_v8f16_align1: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vcmp.i16 ne, q1, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrbt.8 q0, [r0] +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8f16_align1: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vrev64.16 q2, q0 +; CHECK-BE-NEXT: vrev16.8 q0, q2 +; CHECK-BE-NEXT: vrev64.16 q2, q1 +; CHECK-BE-NEXT: vcmp.i16 ne, q2, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrbt.8 q0, [r0] +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp ugt <8 x i16> %b, zeroinitializer + call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %a, <8 x half>* %dest, i32 1, <8 x i1> %c) + ret void +} + +define i8* @masked_v8f16_pre(i8* %y, i8* %x, <8 x i16> %a) { +; CHECK-LE-LABEL: masked_v8f16_pre: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vldr d1, [sp] +; CHECK-LE-NEXT: vldrw.u32 q1, [r1] +; CHECK-LE-NEXT: vmov d0, r2, r3 +; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrht.16 q1, [r0, #4] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8f16_pre: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vldr d1, [sp] +; CHECK-BE-NEXT: vldrh.u16 q1, [r1] +; CHECK-BE-NEXT: vmov d0, r3, r2 +; CHECK-BE-NEXT: vrev64.16 q2, q0 +; CHECK-BE-NEXT: vcmp.s16 gt, q2, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrht.16 q1, [r0, #4] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %y, i32 4 + %0 = bitcast i8* %x to <8 x half>* + %1 = load <8 x half>, <8 x half>* %0, align 4 + %2 = bitcast i8* %z to <8 x half>* + %c = icmp sgt <8 x i16> %a, zeroinitializer + call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c) + ret i8* %z +} + +define i8* @masked_v8f16_post(i8* %y, i8* %x, <8 x i16> %a) { +; CHECK-LE-LABEL: masked_v8f16_post: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: vldr d1, [sp] +; CHECK-LE-NEXT: vldrw.u32 q1, [r1] +; CHECK-LE-NEXT: vmov d0, r2, r3 +; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr +; CHECK-LE-NEXT: vpst +; CHECK-LE-NEXT: vstrht.16 q1, [r0] +; CHECK-LE-NEXT: adds r0, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v8f16_post: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: vldr d1, [sp] +; CHECK-BE-NEXT: vldrh.u16 q1, [r1] +; CHECK-BE-NEXT: vmov d0, r3, r2 +; CHECK-BE-NEXT: vrev64.16 q2, q0 +; CHECK-BE-NEXT: vcmp.s16 gt, q2, zr +; CHECK-BE-NEXT: vpst +; CHECK-BE-NEXT: vstrht.16 q1, [r0] +; CHECK-BE-NEXT: adds r0, #4 +; CHECK-BE-NEXT: bx lr +entry: + %z = getelementptr inbounds i8, i8* %y, i32 4 + %0 = bitcast i8* %x to <8 x half>* + %1 = load <8 x half>, <8 x half>* %0, align 4 + %2 = bitcast i8* %y to <8 x half>* + %c = icmp sgt <8 x i16> %a, zeroinitializer + call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c) + ret i8* %z +} + + +define arm_aapcs_vfpcc void @masked_v2i64(<2 x i64> *%dest, <2 x i64> %a) { +; CHECK-LE-LABEL: masked_v2i64: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: .pad #4 +; CHECK-LE-NEXT: sub sp, #4 +; CHECK-LE-NEXT: vmov r2, s0 +; CHECK-LE-NEXT: movs r3, #0 +; CHECK-LE-NEXT: vmov r1, s1 +; CHECK-LE-NEXT: vmov r12, s3 +; CHECK-LE-NEXT: rsbs r2, r2, #0 +; CHECK-LE-NEXT: vmov r2, s2 +; CHECK-LE-NEXT: sbcs.w r1, r3, r1 +; CHECK-LE-NEXT: mov.w r1, #0 +; CHECK-LE-NEXT: it lt +; CHECK-LE-NEXT: movlt r1, #1 +; CHECK-LE-NEXT: rsbs r2, r2, #0 +; CHECK-LE-NEXT: sbcs.w r2, r3, r12 +; CHECK-LE-NEXT: it lt +; CHECK-LE-NEXT: movlt r3, #1 +; CHECK-LE-NEXT: cmp r3, #0 +; CHECK-LE-NEXT: it ne +; CHECK-LE-NEXT: mvnne r3, #1 +; CHECK-LE-NEXT: bfi r3, r1, #0, #1 +; CHECK-LE-NEXT: and r1, r3, #3 +; CHECK-LE-NEXT: lsls r2, r3, #31 +; CHECK-LE-NEXT: ittt ne +; CHECK-LE-NEXT: vmovne r2, s1 +; CHECK-LE-NEXT: vmovne r3, s0 +; CHECK-LE-NEXT: strdne r3, r2, [r0] +; CHECK-LE-NEXT: lsls r1, r1, #30 +; CHECK-LE-NEXT: ittt mi +; CHECK-LE-NEXT: vmovmi r1, s3 +; CHECK-LE-NEXT: vmovmi r2, s2 +; CHECK-LE-NEXT: strdmi r2, r1, [r0, #8] +; CHECK-LE-NEXT: add sp, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v2i64: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: .pad #4 +; CHECK-BE-NEXT: sub sp, #4 +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: movs r3, #0 +; CHECK-BE-NEXT: vmov r2, s7 +; CHECK-BE-NEXT: vmov r1, s6 +; CHECK-BE-NEXT: vmov r12, s4 +; CHECK-BE-NEXT: rsbs r2, r2, #0 +; CHECK-BE-NEXT: vmov r2, s5 +; CHECK-BE-NEXT: sbcs.w r1, r3, r1 +; CHECK-BE-NEXT: mov.w r1, #0 +; CHECK-BE-NEXT: it lt +; CHECK-BE-NEXT: movlt r1, #1 +; CHECK-BE-NEXT: rsbs r2, r2, #0 +; CHECK-BE-NEXT: sbcs.w r2, r3, r12 +; CHECK-BE-NEXT: it lt +; CHECK-BE-NEXT: movlt r3, #1 +; CHECK-BE-NEXT: cmp r3, #0 +; CHECK-BE-NEXT: it ne +; CHECK-BE-NEXT: mvnne r3, #1 +; CHECK-BE-NEXT: bfi r3, r1, #0, #1 +; CHECK-BE-NEXT: and r1, r3, #3 +; CHECK-BE-NEXT: lsls r2, r3, #31 +; CHECK-BE-NEXT: bne .LBB19_3 +; CHECK-BE-NEXT: @ %bb.1: @ %else +; CHECK-BE-NEXT: lsls r1, r1, #30 +; CHECK-BE-NEXT: bmi .LBB19_4 +; CHECK-BE-NEXT: .LBB19_2: @ %else2 +; CHECK-BE-NEXT: add sp, #4 +; CHECK-BE-NEXT: bx lr +; CHECK-BE-NEXT: .LBB19_3: @ %cond.store +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vmov r2, s5 +; CHECK-BE-NEXT: vmov r3, s4 +; CHECK-BE-NEXT: strd r3, r2, [r0] +; CHECK-BE-NEXT: lsls r1, r1, #30 +; CHECK-BE-NEXT: bpl .LBB19_2 +; CHECK-BE-NEXT: .LBB19_4: @ %cond.store1 +; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vmov r1, s7 +; CHECK-BE-NEXT: vmov r2, s6 +; CHECK-BE-NEXT: strd r2, r1, [r0, #8] +; CHECK-BE-NEXT: add sp, #4 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <2 x i64> %a, zeroinitializer + call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> %a, <2 x i64>* %dest, i32 8, <2 x i1> %c) + ret void +} + +define arm_aapcs_vfpcc void @masked_v2f64(<2 x double> *%dest, <2 x double> %a, <2 x i64> %b) { +; CHECK-LE-LABEL: masked_v2f64: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: .pad #4 +; CHECK-LE-NEXT: sub sp, #4 +; CHECK-LE-NEXT: vmov r2, s4 +; CHECK-LE-NEXT: movs r3, #0 +; CHECK-LE-NEXT: vmov r1, s5 +; CHECK-LE-NEXT: vmov r12, s7 +; CHECK-LE-NEXT: rsbs r2, r2, #0 +; CHECK-LE-NEXT: vmov r2, s6 +; CHECK-LE-NEXT: sbcs.w r1, r3, r1 +; CHECK-LE-NEXT: mov.w r1, #0 +; CHECK-LE-NEXT: it lt +; CHECK-LE-NEXT: movlt r1, #1 +; CHECK-LE-NEXT: rsbs r2, r2, #0 +; CHECK-LE-NEXT: sbcs.w r2, r3, r12 +; CHECK-LE-NEXT: it lt +; CHECK-LE-NEXT: movlt r3, #1 +; CHECK-LE-NEXT: cmp r3, #0 +; CHECK-LE-NEXT: it ne +; CHECK-LE-NEXT: mvnne r3, #1 +; CHECK-LE-NEXT: bfi r3, r1, #0, #1 +; CHECK-LE-NEXT: and r1, r3, #3 +; CHECK-LE-NEXT: lsls r2, r3, #31 +; CHECK-LE-NEXT: it ne +; CHECK-LE-NEXT: vstrne d0, [r0] +; CHECK-LE-NEXT: lsls r1, r1, #30 +; CHECK-LE-NEXT: it mi +; CHECK-LE-NEXT: vstrmi d1, [r0, #8] +; CHECK-LE-NEXT: add sp, #4 +; CHECK-LE-NEXT: bx lr +; +; CHECK-BE-LABEL: masked_v2f64: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: .pad #4 +; CHECK-BE-NEXT: sub sp, #4 +; CHECK-BE-NEXT: vrev64.32 q2, q1 +; CHECK-BE-NEXT: movs r3, #0 +; CHECK-BE-NEXT: vmov r2, s11 +; CHECK-BE-NEXT: vmov r1, s10 +; CHECK-BE-NEXT: vmov r12, s8 +; CHECK-BE-NEXT: rsbs r2, r2, #0 +; CHECK-BE-NEXT: vmov r2, s9 +; CHECK-BE-NEXT: sbcs.w r1, r3, r1 +; CHECK-BE-NEXT: mov.w r1, #0 +; CHECK-BE-NEXT: it lt +; CHECK-BE-NEXT: movlt r1, #1 +; CHECK-BE-NEXT: rsbs r2, r2, #0 +; CHECK-BE-NEXT: sbcs.w r2, r3, r12 +; CHECK-BE-NEXT: it lt +; CHECK-BE-NEXT: movlt r3, #1 +; CHECK-BE-NEXT: cmp r3, #0 +; CHECK-BE-NEXT: it ne +; CHECK-BE-NEXT: mvnne r3, #1 +; CHECK-BE-NEXT: bfi r3, r1, #0, #1 +; CHECK-BE-NEXT: and r1, r3, #3 +; CHECK-BE-NEXT: lsls r2, r3, #31 +; CHECK-BE-NEXT: it ne +; CHECK-BE-NEXT: vstrne d0, [r0] +; CHECK-BE-NEXT: lsls r1, r1, #30 +; CHECK-BE-NEXT: it mi +; CHECK-BE-NEXT: vstrmi d1, [r0, #8] +; CHECK-BE-NEXT: add sp, #4 +; CHECK-BE-NEXT: bx lr +entry: + %c = icmp sgt <2 x i64> %b, zeroinitializer + call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %a, <2 x double>* %dest, i32 8, <2 x i1> %c) + ret void +} + + +declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>) +declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>) +declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>) +declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>) +declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, i32, <8 x i1>) +declare void @llvm.masked.store.v2i64.p0v2i64(<2 x i64>, <2 x i64>*, i32, <2 x i1>) +declare void @llvm.masked.store.v2f64.p0v2f64(<2 x double>, <2 x double>*, i32, <2 x i1>)