diff --git a/llvm/lib/Target/ARM/MVETailPredication.cpp b/llvm/lib/Target/ARM/MVETailPredication.cpp --- a/llvm/lib/Target/ARM/MVETailPredication.cpp +++ b/llvm/lib/Target/ARM/MVETailPredication.cpp @@ -236,7 +236,8 @@ unsigned ID = I->getIntrinsicID(); FixedVectorType *VecTy; if (ID == Intrinsic::masked_load || isGather(I)) { - if (ID == Intrinsic::arm_mve_vldr_gather_base_wb_predicated) + if (ID == Intrinsic::arm_mve_vldr_gather_base_wb || + ID == Intrinsic::arm_mve_vldr_gather_base_wb_predicated) // then the type is a StructType VecTy = dyn_cast(I->getType()->getContainedType(0)); else diff --git a/llvm/test/CodeGen/Thumb2/mve-gather-tailpred.ll b/llvm/test/CodeGen/Thumb2/mve-gather-tailpred.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-gather-tailpred.ll @@ -0,0 +1,73 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedldst -enable-arm-maskedgatscat -tail-predication=force-enabled %s -o - | FileCheck %s + +define arm_aapcs_vfpcc void @gather_inc_v4i32_simple(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n) { +; CHECK-LABEL: gather_inc_v4i32_simple: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, lr} +; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: cmp r2, #1 +; CHECK-NEXT: it lt +; CHECK-NEXT: poplt {r4, pc} +; CHECK-NEXT: bic r12, r2, #3 +; CHECK-NEXT: movs r3, #1 +; CHECK-NEXT: sub.w lr, r12, #4 +; CHECK-NEXT: add.w r4, r3, lr, lsr #2 +; CHECK-NEXT: adr r3, .LCPI0_0 +; CHECK-NEXT: vldrw.u32 q0, [r3] +; CHECK-NEXT: vadd.i32 q0, q0, r0 +; CHECK-NEXT: .LBB0_1: @ %vector.ph +; CHECK-NEXT: @ =>This Loop Header: Depth=1 +; CHECK-NEXT: @ Child Loop BB0_2 Depth 2 +; CHECK-NEXT: mov r0, r1 +; CHECK-NEXT: vmov q1, q0 +; CHECK-NEXT: dls lr, r4 +; CHECK-NEXT: .LBB0_2: @ %vector.body +; CHECK-NEXT: @ Parent Loop BB0_1 Depth=1 +; CHECK-NEXT: @ => This Inner Loop Header: Depth=2 +; CHECK-NEXT: vldrw.u32 q2, [q1, #16]! +; CHECK-NEXT: vstrb.8 q2, [r0], #16 +; CHECK-NEXT: le lr, .LBB0_2 +; CHECK-NEXT: @ %bb.3: @ %middle.block +; CHECK-NEXT: @ in Loop: Header=BB0_1 Depth=1 +; CHECK-NEXT: cmp r12, r2 +; CHECK-NEXT: bne .LBB0_1 +; CHECK-NEXT: @ %bb.4: @ %for.cond.cleanup +; CHECK-NEXT: pop {r4, pc} +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.5: +; CHECK-NEXT: .LCPI0_0: +; CHECK-NEXT: .long 4294967280 @ 0xfffffff0 +; CHECK-NEXT: .long 4294967284 @ 0xfffffff4 +; CHECK-NEXT: .long 4294967288 @ 0xfffffff8 +; CHECK-NEXT: .long 4294967292 @ 0xfffffffc +entry: + %cmp22 = icmp sgt i32 %n, 0 + br i1 %cmp22, label %vector.ph, label %for.cond.cleanup + +vector.ph: ; preds = %for.body.preheader + %n.vec = and i32 %n, -4 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %vec.ind = phi <4 x i32> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] + %0 = getelementptr inbounds i32, i32* %data, <4 x i32> %vec.ind + %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %0, i32 4, <4 x i1> , <4 x i32> undef) + %1 = getelementptr inbounds i32, i32* %dst, i32 %index + %2 = bitcast i32* %1 to <4 x i32>* + store <4 x i32> %wide.masked.gather, <4 x i32>* %2, align 4 + %index.next = add i32 %index, 4 + %vec.ind.next = add <4 x i32> %vec.ind, + %3 = icmp eq i32 %index.next, %n.vec + br i1 %3, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %cmp.n = icmp eq i32 %n.vec, %n + br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph + +for.cond.cleanup: ; preds = %for.body, %middle.block, %entry + ret void +} + +declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)