Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -6900,6 +6900,35 @@ DAG.getZExtOrTrunc(Const, getCurSDLoc(), PtrVT))); return; } + case Intrinsic::get_active_lane_mask: { + auto DL = getCurSDLoc(); + SDValue Index = getValue(I.getOperand(0)); + SDValue BTC = getValue(I.getOperand(1)); + Type *ElementTy = I.getOperand(0)->getType(); + EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); + unsigned VecWidth = VT.getVectorNumElements(); + + SmallVector OpsBTC; + SmallVector OpsIndex; + SmallVector OpsStepConstants; + for (unsigned i = 0; i < VecWidth; i++) { + OpsBTC.push_back(BTC); + OpsIndex.push_back(Index); + OpsStepConstants.push_back(DAG.getConstant(i, DL, MVT::getVT(ElementTy))); + } + + EVT CCVT = MVT::i1; + auto VecTy = MVT::getVT(FixedVectorType::get(ElementTy, VecWidth)); + SDValue VectorIndex = DAG.getBuildVector(VecTy, DL, OpsIndex); + SDValue VectorStep = DAG.getBuildVector(VecTy, DL, OpsStepConstants); + SDValue VectorInduction = + DAG.getNode(ISD::ADD, DL, VecTy, VectorIndex, VectorStep); + SDValue VectorBTC = DAG.getBuildVector(VecTy, DL, OpsBTC); + CCVT = EVT::getVectorVT(I.getContext(), CCVT, VecWidth); + setValue(&I, DAG.getSetCC(DL, CCVT, VectorInduction, VectorBTC, + ISD::CondCode::SETULE)); + return; + } } } Index: llvm/test/CodeGen/Thumb2/active_lane_mask.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/active_lane_mask.ll @@ -0,0 +1,226 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve %s -o - | FileCheck %s + +define <4 x i32> @v4i32(i32 %index, i32 %BTC, <4 x i32> %V1, <4 x i32> %V2) { +; CHECK-LABEL: v4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: adr.w r12, .LCPI0_0 +; CHECK-NEXT: vdup.32 q1, r1 +; CHECK-NEXT: vldrw.u32 q0, [r12] +; CHECK-NEXT: vadd.i32 q0, q0, r0 +; CHECK-NEXT: add r0, sp, #8 +; CHECK-NEXT: vcmp.u32 cs, q1, q0 +; CHECK-NEXT: vmov d0, r2, r3 +; CHECK-NEXT: vldr d1, [sp] +; CHECK-NEXT: vldrw.u32 q1, [r0] +; CHECK-NEXT: vpsel q0, q0, q1 +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: vmov r2, r3, d1 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI0_0: +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 2 @ 0x2 +; CHECK-NEXT: .long 3 @ 0x3 + %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %BTC) + %select = select <4 x i1> %active.lane.mask, <4 x i32> %V1, <4 x i32> %V2 + ret <4 x i32> %select +} + +define <8 x i16> @v8i16(i32 %index, i32 %BTC, <8 x i16> %V1, <8 x i16> %V2) { +; CHECK-LABEL: v8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: adr.w r12, .LCPI1_0 +; CHECK-NEXT: vdup.32 q1, r1 +; CHECK-NEXT: vldrw.u32 q0, [r12] +; CHECK-NEXT: vmov.i8 q2, #0x0 +; CHECK-NEXT: vmov.i8 q3, #0xff +; CHECK-NEXT: vadd.i32 q0, q0, r0 +; CHECK-NEXT: vcmp.u32 cs, q1, q0 +; CHECK-NEXT: vpsel q4, q3, q2 +; CHECK-NEXT: vmov r1, s16 +; CHECK-NEXT: vmov.16 q0[0], r1 +; CHECK-NEXT: vmov r1, s17 +; CHECK-NEXT: vmov.16 q0[1], r1 +; CHECK-NEXT: vmov r1, s18 +; CHECK-NEXT: vmov.16 q0[2], r1 +; CHECK-NEXT: vmov r1, s19 +; CHECK-NEXT: vmov.16 q0[3], r1 +; CHECK-NEXT: adr r1, .LCPI1_1 +; CHECK-NEXT: vldrw.u32 q4, [r1] +; CHECK-NEXT: vadd.i32 q4, q4, r0 +; CHECK-NEXT: vcmp.u32 cs, q1, q4 +; CHECK-NEXT: vpsel q1, q3, q2 +; CHECK-NEXT: vmov r0, s4 +; CHECK-NEXT: vmov.16 q0[4], r0 +; CHECK-NEXT: vmov r0, s5 +; CHECK-NEXT: vmov.16 q0[5], r0 +; CHECK-NEXT: vmov r0, s6 +; CHECK-NEXT: vmov.16 q0[6], r0 +; CHECK-NEXT: vmov r0, s7 +; CHECK-NEXT: vmov.16 q0[7], r0 +; CHECK-NEXT: add r0, sp, #24 +; CHECK-NEXT: vcmp.i16 ne, q0, zr +; CHECK-NEXT: vmov d0, r2, r3 +; CHECK-NEXT: vldr d1, [sp, #16] +; CHECK-NEXT: vldrw.u32 q1, [r0] +; CHECK-NEXT: vpsel q0, q0, q1 +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: vmov r2, r3, d1 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI1_0: +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 2 @ 0x2 +; CHECK-NEXT: .long 3 @ 0x3 +; CHECK-NEXT: .LCPI1_1: +; CHECK-NEXT: .long 4 @ 0x4 +; CHECK-NEXT: .long 5 @ 0x5 +; CHECK-NEXT: .long 6 @ 0x6 +; CHECK-NEXT: .long 7 @ 0x7 + %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %BTC) + %select = select <8 x i1> %active.lane.mask, <8 x i16> %V1, <8 x i16> %V2 + ret <8 x i16> %select +} + +define <16 x i8> @v16i8(i32 %index, i32 %BTC, <16 x i8> %V1, <16 x i8> %V2) { +; CHECK-LABEL: v16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vpush {d8, d9, d10, d11} +; CHECK-NEXT: adr.w r12, .LCPI2_0 +; CHECK-NEXT: vdup.32 q3, r1 +; CHECK-NEXT: vldrw.u32 q0, [r12] +; CHECK-NEXT: vmov.i8 q1, #0xff +; CHECK-NEXT: vadd.i32 q0, q0, r0 +; CHECK-NEXT: vcmp.u32 cs, q3, q0 +; CHECK-NEXT: vmov.i8 q0, #0x0 +; CHECK-NEXT: vpsel q4, q1, q0 +; CHECK-NEXT: vmov r1, s16 +; CHECK-NEXT: vmov.16 q2[0], r1 +; CHECK-NEXT: vmov r1, s17 +; CHECK-NEXT: vmov.16 q2[1], r1 +; CHECK-NEXT: vmov r1, s18 +; CHECK-NEXT: vmov.16 q2[2], r1 +; CHECK-NEXT: vmov r1, s19 +; CHECK-NEXT: vmov.16 q2[3], r1 +; CHECK-NEXT: adr r1, .LCPI2_1 +; CHECK-NEXT: vldrw.u32 q4, [r1] +; CHECK-NEXT: vadd.i32 q4, q4, r0 +; CHECK-NEXT: vcmp.u32 cs, q3, q4 +; CHECK-NEXT: vpsel q4, q1, q0 +; CHECK-NEXT: vmov r1, s16 +; CHECK-NEXT: vmov.16 q2[4], r1 +; CHECK-NEXT: vmov r1, s17 +; CHECK-NEXT: vmov.16 q2[5], r1 +; CHECK-NEXT: vmov r1, s18 +; CHECK-NEXT: vmov.16 q2[6], r1 +; CHECK-NEXT: vmov r1, s19 +; CHECK-NEXT: vmov.16 q2[7], r1 +; CHECK-NEXT: vcmp.i16 ne, q2, zr +; CHECK-NEXT: vpsel q4, q1, q0 +; CHECK-NEXT: vmov.u16 r1, q4[0] +; CHECK-NEXT: vmov.8 q2[0], r1 +; CHECK-NEXT: vmov.u16 r1, q4[1] +; CHECK-NEXT: vmov.8 q2[1], r1 +; CHECK-NEXT: vmov.u16 r1, q4[2] +; CHECK-NEXT: vmov.8 q2[2], r1 +; CHECK-NEXT: vmov.u16 r1, q4[3] +; CHECK-NEXT: vmov.8 q2[3], r1 +; CHECK-NEXT: vmov.u16 r1, q4[4] +; CHECK-NEXT: vmov.8 q2[4], r1 +; CHECK-NEXT: vmov.u16 r1, q4[5] +; CHECK-NEXT: vmov.8 q2[5], r1 +; CHECK-NEXT: vmov.u16 r1, q4[6] +; CHECK-NEXT: vmov.8 q2[6], r1 +; CHECK-NEXT: vmov.u16 r1, q4[7] +; CHECK-NEXT: vmov.8 q2[7], r1 +; CHECK-NEXT: adr r1, .LCPI2_2 +; CHECK-NEXT: vldrw.u32 q4, [r1] +; CHECK-NEXT: vadd.i32 q4, q4, r0 +; CHECK-NEXT: vcmp.u32 cs, q3, q4 +; CHECK-NEXT: vpsel q5, q1, q0 +; CHECK-NEXT: vmov r1, s20 +; CHECK-NEXT: vmov.16 q4[0], r1 +; CHECK-NEXT: vmov r1, s21 +; CHECK-NEXT: vmov.16 q4[1], r1 +; CHECK-NEXT: vmov r1, s22 +; CHECK-NEXT: vmov.16 q4[2], r1 +; CHECK-NEXT: vmov r1, s23 +; CHECK-NEXT: vmov.16 q4[3], r1 +; CHECK-NEXT: adr r1, .LCPI2_3 +; CHECK-NEXT: vldrw.u32 q5, [r1] +; CHECK-NEXT: vadd.i32 q5, q5, r0 +; CHECK-NEXT: vcmp.u32 cs, q3, q5 +; CHECK-NEXT: vpsel q3, q1, q0 +; CHECK-NEXT: vmov r0, s12 +; CHECK-NEXT: vmov.16 q4[4], r0 +; CHECK-NEXT: vmov r0, s13 +; CHECK-NEXT: vmov.16 q4[5], r0 +; CHECK-NEXT: vmov r0, s14 +; CHECK-NEXT: vmov.16 q4[6], r0 +; CHECK-NEXT: vmov r0, s15 +; CHECK-NEXT: vmov.16 q4[7], r0 +; CHECK-NEXT: vcmp.i16 ne, q4, zr +; CHECK-NEXT: vpsel q0, q1, q0 +; CHECK-NEXT: vmov.u16 r0, q0[0] +; CHECK-NEXT: vmov.8 q2[8], r0 +; CHECK-NEXT: vmov.u16 r0, q0[1] +; CHECK-NEXT: vmov.8 q2[9], r0 +; CHECK-NEXT: vmov.u16 r0, q0[2] +; CHECK-NEXT: vmov.8 q2[10], r0 +; CHECK-NEXT: vmov.u16 r0, q0[3] +; CHECK-NEXT: vmov.8 q2[11], r0 +; CHECK-NEXT: vmov.u16 r0, q0[4] +; CHECK-NEXT: vmov.8 q2[12], r0 +; CHECK-NEXT: vmov.u16 r0, q0[5] +; CHECK-NEXT: vmov.8 q2[13], r0 +; CHECK-NEXT: vmov.u16 r0, q0[6] +; CHECK-NEXT: vmov.8 q2[14], r0 +; CHECK-NEXT: vmov.u16 r0, q0[7] +; CHECK-NEXT: vmov.8 q2[15], r0 +; CHECK-NEXT: vmov d0, r2, r3 +; CHECK-NEXT: add r0, sp, #40 +; CHECK-NEXT: vldr d1, [sp, #32] +; CHECK-NEXT: vldrw.u32 q1, [r0] +; CHECK-NEXT: vcmp.i8 ne, q2, zr +; CHECK-NEXT: vpsel q0, q0, q1 +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: vmov r2, r3, d1 +; CHECK-NEXT: vpop {d8, d9, d10, d11} +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI2_0: +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 2 @ 0x2 +; CHECK-NEXT: .long 3 @ 0x3 +; CHECK-NEXT: .LCPI2_1: +; CHECK-NEXT: .long 4 @ 0x4 +; CHECK-NEXT: .long 5 @ 0x5 +; CHECK-NEXT: .long 6 @ 0x6 +; CHECK-NEXT: .long 7 @ 0x7 +; CHECK-NEXT: .LCPI2_2: +; CHECK-NEXT: .long 8 @ 0x8 +; CHECK-NEXT: .long 9 @ 0x9 +; CHECK-NEXT: .long 10 @ 0xa +; CHECK-NEXT: .long 11 @ 0xb +; CHECK-NEXT: .LCPI2_3: +; CHECK-NEXT: .long 12 @ 0xc +; CHECK-NEXT: .long 13 @ 0xd +; CHECK-NEXT: .long 14 @ 0xe +; CHECK-NEXT: .long 15 @ 0xf + %active.lane.mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 %index, i32 %BTC) + %select = select <16 x i1> %active.lane.mask, <16 x i8> %V1, <16 x i8> %V2 + ret <16 x i8> %select +} + +declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32) +declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32) +declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32)