Index: llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp =================================================================== --- llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp +++ llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp @@ -1506,10 +1506,30 @@ llvm_unreachable("trying to unpredicate a non-predicated instruction"); }; + MachineInstr *VCMP = nullptr; for (auto &Block : LoLoop.getVPTBlocks()) { SmallVectorImpl &Insts = Block.getInsts(); - if (VPTState::isEntryPredicatedOnVCTP(Block, /*exclusive*/true)) { + auto ReplaceVCMPWithVPT = [&]() { + assert(VCMP && "Replacing a removed or non-existent VCMP"); + // Replace the VCMP with a VPT + MachineInstrBuilder MIB = + BuildMI(*VCMP->getParent(), VCMP, VCMP->getDebugLoc(), + TII->get(VCMPOpcodeToVPT(VCMP->getOpcode()))); + MIB.addImm(ARMVCC::Then); + // Register one + MIB.add(VCMP->getOperand(1)); + // Register two + MIB.add(VCMP->getOperand(2)); + // The comparison code, e.g. ge, eq, lt + MIB.add(VCMP->getOperand(3)); + LLVM_DEBUG(dbgs() << "ARM Loops: Combining with VCMP to VPT: " << *MIB); + LoLoop.BlockMasksToRecompute.insert(MIB.getInstr()); + LoLoop.ToRemove.insert(VCMP); + VCMP = nullptr; + }; + + if (VPTState::isEntryPredicatedOnVCTP(Block, /*exclusive*/ true)) { if (VPTState::hasUniformPredicate(Block)) { // A vpt block starting with VPST, is only predicated upon vctp and has no // internal vpr defs: @@ -1541,26 +1561,7 @@ // Check if the instruction defining vpr is a vcmp so it can be combined // with the VPST This should be the divergent instruction - MachineInstr *VCMP = - VCMPOpcodeToVPT(Divergent->getOpcode()) != 0 ? Divergent : nullptr; - - auto ReplaceVCMPWithVPT = [&]() { - // Replace the VCMP with a VPT - MachineInstrBuilder MIB = BuildMI( - *Divergent->getParent(), Divergent, Divergent->getDebugLoc(), - TII->get(VCMPOpcodeToVPT(VCMP->getOpcode()))); - MIB.addImm(ARMVCC::Then); - // Register one - MIB.add(VCMP->getOperand(1)); - // Register two - MIB.add(VCMP->getOperand(2)); - // The comparison code, e.g. ge, eq, lt - MIB.add(VCMP->getOperand(3)); - LLVM_DEBUG(dbgs() - << "ARM Loops: Combining with VCMP to VPT: " << *MIB); - LoLoop.BlockMasksToRecompute.insert(MIB.getInstr()); - LoLoop.ToRemove.insert(VCMP); - }; + VCMP = VCMPOpcodeToVPT(Divergent->getOpcode()) != 0 ? Divergent : VCMP; // If the VCMP exists and it has the same register values as the VPST, // replace both with a VPT @@ -1595,6 +1596,17 @@ // The vctp will be removed, so the block mask of the vp(s)t will need // to be recomputed. LoLoop.BlockMasksToRecompute.insert(Insts.front()); + } else if (VCMP && Insts.front()->getOpcode() == ARM::MVE_VPST) { + // Merge this block's VPST and a dangling VCMP from a previous block if + // possible + MachineInstr *VPST = Insts.front(); + if (RDA->hasSameReachingDef(VCMP, VPST, VCMP->getOperand(1).getReg()) && + RDA->hasSameReachingDef(VCMP, VPST, VCMP->getOperand(2).getReg()) && + RDA->getUniqueReachingMIDef(VPST, ARM::VPR) == VCMP) { + ReplaceVCMPWithVPT(); + LLVM_DEBUG(dbgs() << "ARM Loops: Removing VPST: " << *VPST); + LoLoop.ToRemove.insert(VPST); + } } } Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/vcmp-vpst-combination-across-blocks.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/vcmp-vpst-combination-across-blocks.mir @@ -0,0 +1,306 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - --verify-machineinstrs | FileCheck %s + +--- | + define void @arm_max_no_idx_f32(float* nocapture readonly %pSrc, i32 %blockSize, float* nocapture %pResult) { + entry: + %shr = lshr i32 %blockSize, 2 + %cmp.not26 = icmp eq i32 %shr, 0 + %0 = call i1 @llvm.test.set.loop.iterations.i32(i32 %shr) + br i1 %0, label %while.body.preheader, label %while.end + + while.body.preheader: ; preds = %entry + %1 = and i32 %blockSize, -4 + br label %while.body + + while.body: ; preds = %while.body, %while.body.preheader + %pSrc.addr.028 = phi float* [ %add.ptr, %while.body ], [ %pSrc, %while.body.preheader ] + %curExtremValVec.027 = phi <4 x float> [ %5, %while.body ], [ , %while.body.preheader ] + %2 = phi i32 [ %shr, %while.body.preheader ], [ %6, %while.body ] + %3 = bitcast float* %pSrc.addr.028 to <4 x float>* + %4 = load <4 x float>, <4 x float>* %3, align 4 + %5 = tail call fast <4 x float> @llvm.maxnum.v4f32(<4 x float> %4, <4 x float> %curExtremValVec.027) + %add.ptr = getelementptr inbounds float, float* %pSrc.addr.028, i32 4 + %6 = call i32 @llvm.loop.decrement.reg.i32(i32 %2, i32 1) + %7 = icmp ne i32 %6, 0 + br i1 %7, label %while.body, label %while.end.loopexit + + while.end.loopexit: ; preds = %while.body + %scevgep = getelementptr float, float* %pSrc, i32 %1 + br label %while.end + + while.end: ; preds = %while.end.loopexit, %entry + %curExtremValVec.0.lcssa = phi <4 x float> [ , %entry ], [ %5, %while.end.loopexit ] + %pSrc.addr.0.lcssa = phi float* [ %pSrc, %entry ], [ %scevgep, %while.end.loopexit ] + %8 = tail call fast float @llvm.arm.mve.maxnmv.f32.v4f32(float 0xC7EFFFFFE0000000, <4 x float> %curExtremValVec.0.lcssa) + %9 = bitcast float %8 to i32 + %and = and i32 %blockSize, 3 + %cmp2.not22 = icmp eq i32 %and, 0 + %10 = add nuw nsw i32 %and, 3 + %11 = lshr i32 %10, 2 + %12 = shl nuw nsw i32 %11, 2 + %13 = add nsw i32 %12, -4 + %14 = lshr i32 %13, 2 + %15 = add nuw nsw i32 %14, 1 + br i1 %cmp2.not22, label %while.end6, label %vector.ph + + vector.ph: ; preds = %while.end + %16 = insertelement <4 x i32> undef, i32 %9, i64 0 + %17 = shufflevector <4 x i32> %16, <4 x i32> undef, <4 x i32> zeroinitializer + %18 = bitcast <4 x i32> %17 to <4 x float> + call void @llvm.set.loop.iterations.i32(i32 %15) + br label %vector.body + + vector.body: ; preds = %vector.body, %vector.ph + %lsr.iv1 = phi float* [ %scevgep2, %vector.body ], [ %pSrc.addr.0.lcssa, %vector.ph ] + %vec.phi = phi <4 x float> [ %18, %vector.ph ], [ %25, %vector.body ] + %19 = phi i32 [ %15, %vector.ph ], [ %26, %vector.body ] + %20 = phi i32 [ %and, %vector.ph ], [ %22, %vector.body ] + %lsr.iv13 = bitcast float* %lsr.iv1 to <4 x float>* + %21 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %20) + %22 = sub i32 %20, 4 + %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %lsr.iv13, i32 4, <4 x i1> %21, <4 x float> undef) + %23 = fcmp fast olt <4 x float> %vec.phi, %wide.masked.load + %24 = and <4 x i1> %21, %23 + %25 = select <4 x i1> %24, <4 x float> %wide.masked.load, <4 x float> %vec.phi + %scevgep2 = getelementptr float, float* %lsr.iv1, i32 4 + %26 = call i32 @llvm.loop.decrement.reg.i32(i32 %19, i32 1) + %27 = icmp ne i32 %26, 0 + br i1 %27, label %vector.body, label %middle.block + + middle.block: ; preds = %vector.body + %28 = call nnan float @llvm.vector.reduce.fmax.v4f32(<4 x float> %25) + br label %while.end6 + + while.end6: ; preds = %middle.block, %while.end + %maxValue.0.lcssa = phi float [ %8, %while.end ], [ %28, %middle.block ] + store float %maxValue.0.lcssa, float* %pResult, align 4 + ret void + } + + declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>) + + declare float @llvm.arm.mve.maxnmv.f32.v4f32(float, <4 x float>) + + declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32) + + declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>) + + declare float @llvm.vector.reduce.fmax.v4f32(<4 x float>) + + declare void @llvm.set.loop.iterations.i32(i32) + + declare i32 @llvm.loop.decrement.reg.i32(i32, i32) + + declare i1 @llvm.test.set.loop.iterations.i32(i32) + + declare <4 x i1> @llvm.arm.mve.vctp32(i32) + +... +--- +name: arm_max_no_idx_f32 +alignment: 8 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +failedISel: false +tracksRegLiveness: true +hasWinCFI: false +registers: [] +liveins: + - { reg: '$r0', virtual-reg: '' } + - { reg: '$r1', virtual-reg: '' } + - { reg: '$r2', virtual-reg: '' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 8 + offsetAdjustment: 0 + maxAlignment: 4 + adjustsStack: false + hasCalls: false + stackProtector: '' + maxCallFrameSize: 0 + cvBytesOfCalleeSavedRegisters: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false + localFrameSize: 0 + savePoint: '' + restorePoint: '' +fixedStack: [] +stack: + - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +callSites: [] +debugValueSubstitutions: [] +constants: + - id: 0 + value: float 0xC7EFFFFFE0000000 + alignment: 4 + isTargetSpecific: false +machineFunctionInfo: {} +body: | + ; CHECK-LABEL: name: arm_max_no_idx_f32 + ; CHECK: bb.0.entry: + ; CHECK: successors: %bb.1(0x40000000), %bb.4(0x40000000) + ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7 + ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp + ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8 + ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4 + ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8 + ; CHECK: renamable $lr = t2LSRri renamable $r1, 2, 14 /* CC::al */, $noreg, $noreg + ; CHECK: renamable $q0 = MVE_VMVNimmi32 1152, 0, $noreg, undef renamable $q0 + ; CHECK: $lr = t2WLS killed renamable $lr, %bb.4 + ; CHECK: bb.1.while.body.preheader: + ; CHECK: successors: %bb.2(0x80000000) + ; CHECK: liveins: $lr, $r0, $r1, $r2 + ; CHECK: renamable $q0 = MVE_VMVNimmi32 1152, 0, $noreg, undef renamable $q0 + ; CHECK: renamable $r12 = t2BICri renamable $r1, 3, 14 /* CC::al */, $noreg, $noreg + ; CHECK: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg + ; CHECK: bb.2.while.body (align 4): + ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) + ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r3, $r12 + ; CHECK: renamable $r3, renamable $q1 = MVE_VLDRWU32_post killed renamable $r3, 16, 0, $noreg :: (load 16 from %ir.3, align 4) + ; CHECK: renamable $q0 = nnan ninf nsz arcp contract afn reassoc MVE_VMAXNMf32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0 + ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2 + ; CHECK: bb.3.while.end.loopexit: + ; CHECK: successors: %bb.4(0x80000000) + ; CHECK: liveins: $q0, $r0, $r1, $r2, $r12 + ; CHECK: renamable $r0 = t2ADDrs killed renamable $r0, killed renamable $r12, 18, 14 /* CC::al */, $noreg, $noreg + ; CHECK: bb.4.while.end: + ; CHECK: successors: %bb.8(0x30000000), %bb.5(0x50000000) + ; CHECK: liveins: $q0, $r0, $r1, $r2 + ; CHECK: renamable $s4 = VLDRS %const.0, 0, 14 /* CC::al */, $noreg :: (load 4 from constant-pool) + ; CHECK: renamable $r1 = t2ANDri killed renamable $r1, 3, 14 /* CC::al */, $noreg, def $cpsr + ; CHECK: $r12 = VMOVRS killed $s4, 14 /* CC::al */, $noreg + ; CHECK: renamable $r12 = MVE_VMAXNMVf32 killed renamable $r12, killed renamable $q0, 0, $noreg + ; CHECK: tBcc %bb.8, 0 /* CC::eq */, killed $cpsr + ; CHECK: bb.5.vector.ph: + ; CHECK: successors: %bb.6(0x80000000) + ; CHECK: liveins: $r0, $r1, $r2, $r12 + ; CHECK: renamable $q0 = MVE_VDUP32 killed renamable $r12, 0, $noreg, undef renamable $q0 + ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r1 + ; CHECK: bb.6.vector.body (align 4): + ; CHECK: successors: %bb.6(0x7c000000), %bb.7(0x04000000) + ; CHECK: liveins: $lr, $q0, $r0, $r2 + ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg :: (load 16 from %ir.lsr.iv13, align 4) + ; CHECK: MVE_VPTv4f32 8, renamable $q1, renamable $q0, 12, implicit-def $vpr + ; CHECK: renamable $q0 = MVE_VORR killed renamable $q1, killed renamable $q1, 1, killed renamable $vpr, killed renamable $q0 + ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.6 + ; CHECK: bb.7.middle.block: + ; CHECK: liveins: $q0, $r2 + ; CHECK: renamable $s4 = nnan VFP_VMAXNMS renamable $s2, renamable $s3 + ; CHECK: renamable $s0 = nnan VFP_VMAXNMS killed renamable $s0, killed renamable $s1, implicit killed $q0 + ; CHECK: renamable $s0 = nnan VFP_VMAXNMS killed renamable $s0, killed renamable $s4 + ; CHECK: VSTRS killed renamable $s0, killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (store 4 into %ir.pResult) + ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc + ; CHECK: bb.8: + ; CHECK: liveins: $r2, $r12 + ; CHECK: $s0 = VMOVSR killed $r12, 14 /* CC::al */, $noreg + ; CHECK: VSTRS killed renamable $s0, killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (store 4 into %ir.pResult) + ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc + ; CHECK: bb.9 (align 4): + ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 4 + bb.0.entry: + successors: %bb.1(0x40000000), %bb.4(0x40000000) + liveins: $r0, $r1, $r2, $r7, $lr + + frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp + frame-setup CFI_INSTRUCTION def_cfa_offset 8 + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r7, -8 + renamable $lr = t2LSRri renamable $r1, 2, 14 /* CC::al */, $noreg, $noreg + renamable $q0 = MVE_VMVNimmi32 1152, 0, $noreg, undef renamable $q0 + t2WhileLoopStart renamable $lr, %bb.4, implicit-def dead $cpsr + tB %bb.1, 14 /* CC::al */, $noreg + + bb.1.while.body.preheader: + successors: %bb.2(0x80000000) + liveins: $lr, $r0, $r1, $r2 + + renamable $q0 = MVE_VMVNimmi32 1152, 0, $noreg, undef renamable $q0 + renamable $r12 = t2BICri renamable $r1, 3, 14 /* CC::al */, $noreg, $noreg + $r3 = tMOVr $r0, 14 /* CC::al */, $noreg + + bb.2.while.body (align 4): + successors: %bb.2(0x7c000000), %bb.3(0x04000000) + liveins: $lr, $q0, $r0, $r1, $r2, $r3, $r12 + + renamable $lr = t2LoopDec killed renamable $lr, 1 + renamable $r3, renamable $q1 = MVE_VLDRWU32_post killed renamable $r3, 16, 0, $noreg :: (load 16 from %ir.3, align 4) + renamable $q0 = nnan ninf nsz arcp contract afn reassoc MVE_VMAXNMf32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0 + t2LoopEnd renamable $lr, %bb.2, implicit-def dead $cpsr + tB %bb.3, 14 /* CC::al */, $noreg + + bb.3.while.end.loopexit: + successors: %bb.4(0x80000000) + liveins: $q0, $r0, $r1, $r2, $r12 + + renamable $r0 = t2ADDrs killed renamable $r0, killed renamable $r12, 18, 14 /* CC::al */, $noreg, $noreg + + bb.4.while.end: + successors: %bb.8(0x30000000), %bb.5(0x50000000) + liveins: $q0, $r0, $r1, $r2 + + renamable $s4 = VLDRS %const.0, 0, 14 /* CC::al */, $noreg :: (load 4 from constant-pool) + renamable $r1 = t2ANDri killed renamable $r1, 3, 14 /* CC::al */, $noreg, def $cpsr + $r12 = VMOVRS killed $s4, 14 /* CC::al */, $noreg + renamable $r12 = MVE_VMAXNMVf32 killed renamable $r12, killed renamable $q0, 0, $noreg + tBcc %bb.8, 0 /* CC::eq */, killed $cpsr + + bb.5.vector.ph: + successors: %bb.6(0x80000000) + liveins: $r0, $r1, $r2, $r12 + + renamable $r3, dead $cpsr = nuw nsw tADDi3 renamable $r1, 3, 14 /* CC::al */, $noreg + renamable $q0 = MVE_VDUP32 killed renamable $r12, 0, $noreg, undef renamable $q0 + renamable $r3 = t2ANDri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg + renamable $lr = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg + renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg + renamable $lr = nuw nsw t2ADDrs killed renamable $r3, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg + t2DoLoopStart renamable $lr + + bb.6.vector.body (align 4): + successors: %bb.6(0x7c000000), %bb.7(0x04000000) + liveins: $lr, $q0, $r0, $r1, $r2 + + renamable $lr = t2LoopDec killed renamable $lr, 1 + renamable $vpr = MVE_VCTP32 renamable $r1, 0, $noreg + renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg + MVE_VPST 8, implicit $vpr + renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv13, align 4) + MVE_VPST 8, implicit $vpr + renamable $vpr = MVE_VCMPf32 renamable $q1, renamable $q0, 12, 1, killed renamable $vpr + MVE_VPST 8, implicit $vpr + renamable $q0 = MVE_VORR killed renamable $q1, renamable $q1, 1, killed renamable $vpr, killed renamable $q0 + t2LoopEnd renamable $lr, %bb.6, implicit-def dead $cpsr + tB %bb.7, 14 /* CC::al */, $noreg + + bb.7.middle.block: + liveins: $q0, $r2 + + renamable $s4 = nnan VFP_VMAXNMS renamable $s2, renamable $s3 + renamable $s0 = nnan VFP_VMAXNMS killed renamable $s0, killed renamable $s1, implicit $q0 + renamable $s0 = nnan VFP_VMAXNMS killed renamable $s0, killed renamable $s4 + VSTRS killed renamable $s0, killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (store 4 into %ir.pResult) + frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc + + bb.8: + liveins: $r2, $r12 + + $s0 = VMOVSR killed $r12, 14 /* CC::al */, $noreg + VSTRS killed renamable $s0, killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (store 4 into %ir.pResult) + frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc + + bb.9 (align 4): + CONSTPOOL_ENTRY 0, %const.0, 4 + +...