Index: llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp =================================================================== --- llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp +++ llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp @@ -61,6 +61,8 @@ namespace { + using InstSet = SmallPtrSetImpl; + class PostOrderLoopTraversal { MachineLoop &ML; MachineLoopInfo &MLI; @@ -518,6 +520,59 @@ return MO.isReg() && MO.getReg() && Class->contains(MO.getReg()); } +// Can this instruction generate a non-zero result when given only zeroed +// operands? This allows us to know that, given operands with false bytes +// zeroed by masked loads, that the result will also contain zeros in those +// bytes. +static bool canGenerateNonZeros(const MachineInstr &MI) { + switch (MI.getOpcode()) { + default: + break; + // FIXME: FP minus 0? + //case ARM::MVE_VNEGf16: + //case ARM::MVE_VNEGf32: + case ARM::MVE_VMVN: + case ARM::MVE_VORN: + case ARM::MVE_VCLZs8: + case ARM::MVE_VCLZs16: + case ARM::MVE_VCLZs32: + return true; + } + return false; +} + +// MVE 'narrowing' operate on half a lane, reading from half and writing +// to half, which are referred to has the top and bottom half. The other +// half retains its previous value. +static bool retainsPreviousHalf(const MachineInstr &MI) { + const MCInstrDesc &MCID = MI.getDesc(); + uint64_t Flags = MCID.TSFlags; + return Flags & ARMII::RetainsPreviousHalf != 0; +} + +// Look at its register uses to see if it only can only receive zeros +// into its false lanes which would then produce zeros. Also check that +// the output register is also defined by an FalseLaneZeros instruction +// so that if tail-predication happens, the lanes that aren't updated will +// still be zeros. +static bool producesFalseLaneZeros(MachineInstr &MI, + const TargetRegisterClass *QPRs, + const ReachingDefAnalysis &RDA, + InstSet &FalseLaneZeros) { + if (canGenerateNonZeros(MI)) + return false; + for (auto &MO : MI.operands()) { + if (!MO.isReg() || !MO.getReg()) + continue; + if (auto *OpDef = RDA.getMIOperand(&MI, MO)) + if (FalseLaneZeros.count(OpDef)) + continue; + return false; + } + LLVM_DEBUG(dbgs() << "ARM Loops: Always False Zeros: " << MI); + return true; +} + bool LowOverheadLoop::ValidateLiveOuts() const { // We want to find out if the tail-predicated version of this loop will // produce the same values as the loop in its original form. For this to @@ -538,12 +593,14 @@ // operands, or stored results are equivalent already. Other explicitly // predicated instructions will perform the same operation in the original // loop and the tail-predicated form too. Because of this, we can insert - // loads, stores and other predicated instructions into our KnownFalseZeros + // loads, stores and other predicated instructions into our Predicated // set and build from there. const TargetRegisterClass *QPRs = TRI.getRegClass(ARM::MQPRRegClassID); - SetVector UnknownFalseLanes; - SmallPtrSet KnownFalseZeros; + SetVector Unknown; + SmallPtrSet FalseLaneZeros; + SmallPtrSet Predicated; MachineBasicBlock *MBB = ML.getHeader(); + for (auto &MI : *MBB) { const MCInstrDesc &MCID = MI.getDesc(); uint64_t Flags = MCID.TSFlags; @@ -551,63 +608,49 @@ continue; if (isVectorPredicated(&MI)) { - KnownFalseZeros.insert(&MI); + if (MI.mayLoad()) + FalseLaneZeros.insert(&MI); + Predicated.insert(&MI); continue; } if (MI.getNumDefs() == 0) continue; - // Only evaluate instructions which produce a single value. - assert((MI.getNumDefs() == 1 && MI.defs().begin()->isReg()) && - "Expected no more than one register def"); - - Register DefReg = MI.defs().begin()->getReg(); - for (auto &MO : MI.operands()) { - if (!isRegInClass(MO, QPRs) || !MO.isUse() || MO.getReg() != DefReg) - continue; - - // If this instruction overwrites one of its operands, and that register - // has known lanes, then this instruction also has known predicated false - // lanes. - if (auto *OpDef = RDA.getMIOperand(&MI, MO)) { - if (KnownFalseZeros.count(OpDef)) { - KnownFalseZeros.insert(&MI); - break; - } - } - } - if (!KnownFalseZeros.count(&MI)) - UnknownFalseLanes.insert(&MI); + if (producesFalseLaneZeros(MI, QPRs, RDA, FalseLaneZeros)) + FalseLaneZeros.insert(&MI); + else if (retainsPreviousHalf(MI)) + return false; + else + Unknown.insert(&MI); } - auto HasKnownUsers = [this](MachineInstr *MI, const MachineOperand &MO, - SmallPtrSetImpl &Knowns) { + auto HasPredicatedUsers = [this](MachineInstr *MI, const MachineOperand &MO, + SmallPtrSetImpl &Predicated) { SmallPtrSet Uses; RDA.getGlobalUses(MI, MO.getReg(), Uses); for (auto *Use : Uses) { - if (Use != MI && !Knowns.count(Use)) + if (Use != MI && !Predicated.count(Use)) return false; } return true; }; - // Now for all the unknown values, see if they're only consumed by known - // instructions. Visit in reverse so that we can start at the values being + // Visit the unknowns in reverse so that we can start at the values being // stored and then we can work towards the leaves, hopefully adding more - // instructions to KnownFalseZeros. - for (auto *MI : reverse(UnknownFalseLanes)) { + // instructions to Predicated. + for (auto *MI : reverse(Unknown)) { for (auto &MO : MI->operands()) { if (!isRegInClass(MO, QPRs) || !MO.isDef()) continue; - if (!HasKnownUsers(MI, MO, KnownFalseZeros)) { + if (!HasPredicatedUsers(MI, MO, Predicated)) { LLVM_DEBUG(dbgs() << "ARM Loops: Found an unknown def of : " << TRI.getRegAsmName(MO.getReg()) << " at " << *MI); return false; } } // Any unknown false lanes have been masked away by the user(s). - KnownFalseZeros.insert(MI); + Predicated.insert(MI); } // Collect Q-regs that are live in the exit blocks. We don't collect scalars Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/ctlz-non-zeros.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/ctlz-non-zeros.mir @@ -0,0 +1,330 @@ +# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - | FileCheck %s + +# CHECK-NOT: LETP + +--- | + define arm_aapcs_vfpcc void @test_ctlz_i8(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c, i32 %elts, i32 %iters) #0 { + entry: + %cmp = icmp slt i32 %elts, 1 + br i1 %cmp, label %exit, label %loop.ph + + loop.ph: ; preds = %entry + call void @llvm.set.loop.iterations.i32(i32 %iters) + br label %loop.body + + loop.body: ; preds = %loop.body, %loop.ph + %lsr.iv = phi i32 [ %lsr.iv.next, %loop.body ], [ %iters, %loop.ph ] + %count = phi i32 [ %elts, %loop.ph ], [ %elts.rem, %loop.body ] + %addr.a = phi <8 x i16>* [ %a, %loop.ph ], [ %addr.a.next, %loop.body ] + %addr.b = phi <8 x i16>* [ %b, %loop.ph ], [ %addr.b.next, %loop.body ] + %addr.c = phi <8 x i16>* [ %c, %loop.ph ], [ %addr.c.next, %loop.body ] + %pred = call <8 x i1> @llvm.arm.mve.vctp16(i32 %count) + %elts.rem = sub i32 %count, 8 + %masked.load.a = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %addr.a, i32 2, <8 x i1> %pred, <8 x i16> undef) + %masked.load.b = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %addr.b, i32 2, <8 x i1> %pred, <8 x i16> undef) + %bitcast.a = bitcast <8 x i16> %masked.load.a to <16 x i8> + %ctlz = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %bitcast.a, i1 false) + %shrn = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> %ctlz, <8 x i16> %masked.load.b, i32 1, i32 1, i32 0, i32 1, i32 0, i32 1) + %bitcast = bitcast <16 x i8> %shrn to <8 x i16> + call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %bitcast, <8 x i16>* %addr.c, i32 2, <8 x i1> %pred) + %addr.a.next = getelementptr <8 x i16>, <8 x i16>* %addr.b, i32 1 + %addr.b.next = getelementptr <8 x i16>, <8 x i16>* %addr.b, i32 1 + %addr.c.next = getelementptr <8 x i16>, <8 x i16>* %addr.c, i32 1 + %loop.dec = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv, i32 1) + %end = icmp ne i32 %loop.dec, 0 + %lsr.iv.next = add i32 %lsr.iv, -1 + br i1 %end, label %loop.body, label %exit + + exit: ; preds = %loop.body, %entry + ret void + } + + define arm_aapcs_vfpcc void @test_ctlz_i16(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c, i32 %elts, i32 %iters) #0 { + entry: + %cmp = icmp slt i32 %elts, 1 + br i1 %cmp, label %exit, label %loop.ph + + loop.ph: ; preds = %entry + call void @llvm.set.loop.iterations.i32(i32 %iters) + br label %loop.body + + loop.body: ; preds = %loop.body, %loop.ph + %lsr.iv = phi i32 [ %lsr.iv.next, %loop.body ], [ %iters, %loop.ph ] + %count = phi i32 [ %elts, %loop.ph ], [ %elts.rem, %loop.body ] + %addr.a = phi <4 x i32>* [ %a, %loop.ph ], [ %addr.a.next, %loop.body ] + %addr.b = phi <4 x i32>* [ %b, %loop.ph ], [ %addr.b.next, %loop.body ] + %addr.c = phi <4 x i32>* [ %c, %loop.ph ], [ %addr.c.next, %loop.body ] + %pred = call <4 x i1> @llvm.arm.mve.vctp32(i32 %count) + %elts.rem = sub i32 %count, 4 + %masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.a, i32 4, <4 x i1> %pred, <4 x i32> undef) + %masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.b, i32 4, <4 x i1> %pred, <4 x i32> undef) + %bitcast.a = bitcast <4 x i32> %masked.load.a to <8 x i16> + %ctlz = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %bitcast.a, i1 false) + %shrn = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> %ctlz, <4 x i32> %masked.load.b, i32 3, i32 1, i32 0, i32 1, i32 0, i32 1) + %bitcast = bitcast <8 x i16> %shrn to <4 x i32> + call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %bitcast, <4 x i32>* %addr.c, i32 4, <4 x i1> %pred) + %addr.a.next = getelementptr <4 x i32>, <4 x i32>* %addr.a, i32 1 + %addr.b.next = getelementptr <4 x i32>, <4 x i32>* %addr.b, i32 1 + %addr.c.next = getelementptr <4 x i32>, <4 x i32>* %addr.c, i32 1 + %loop.dec = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv, i32 1) + %end = icmp ne i32 %loop.dec, 0 + %lsr.iv.next = add i32 %lsr.iv, -1 + br i1 %end, label %loop.body, label %exit + + exit: ; preds = %loop.body, %entry + ret void + } + + define arm_aapcs_vfpcc void @test_ctlz_i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c, i32 %elts, i32 %iters) #0 { + entry: + %cmp = icmp slt i32 %elts, 1 + br i1 %cmp, label %exit, label %loop.ph + + loop.ph: ; preds = %entry + call void @llvm.set.loop.iterations.i32(i32 %iters) + br label %loop.body + + loop.body: ; preds = %loop.body, %loop.ph + %lsr.iv = phi i32 [ %lsr.iv.next, %loop.body ], [ %iters, %loop.ph ] + %count = phi i32 [ %elts, %loop.ph ], [ %elts.rem, %loop.body ] + %addr.a = phi <4 x i32>* [ %a, %loop.ph ], [ %addr.a.next, %loop.body ] + %addr.b = phi <4 x i32>* [ %b, %loop.ph ], [ %addr.b.next, %loop.body ] + %addr.c = phi <4 x i32>* [ %c, %loop.ph ], [ %addr.c.next, %loop.body ] + %pred = call <4 x i1> @llvm.arm.mve.vctp32(i32 %count) + %elts.rem = sub i32 %count, 4 + %masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.a, i32 4, <4 x i1> %pred, <4 x i32> undef) + %masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.b, i32 4, <4 x i1> %pred, <4 x i32> undef) + %ctlz = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %masked.load.b, i1 false) + %bitcast.a = bitcast <4 x i32> %masked.load.a to <8 x i16> + %shrn = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> %bitcast.a, <4 x i32> %ctlz, i32 3, i32 1, i32 0, i32 1, i32 0, i32 1) + %bitcast = bitcast <8 x i16> %shrn to <4 x i32> + call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %bitcast, <4 x i32>* %addr.c, i32 4, <4 x i1> %pred) + %addr.a.next = getelementptr <4 x i32>, <4 x i32>* %addr.a, i32 1 + %addr.b.next = getelementptr <4 x i32>, <4 x i32>* %addr.b, i32 1 + %addr.c.next = getelementptr <4 x i32>, <4 x i32>* %addr.c, i32 1 + %loop.dec = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv, i32 1) + %end = icmp ne i32 %loop.dec, 0 + %lsr.iv.next = add i32 %lsr.iv, -1 + br i1 %end, label %loop.body, label %exit + + exit: ; preds = %loop.body, %entry + ret void + } + + declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1 immarg) + declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1 immarg) + declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1 immarg) + declare void @llvm.set.loop.iterations.i32(i32) + declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) + declare <4 x i1> @llvm.arm.mve.vctp32(i32) + declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) + declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>) + declare <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16>, <4 x i32>, i32, i32, i32, i32, i32, i32) + declare <8 x i1> @llvm.arm.mve.vctp16(i32) + declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>) + declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>) + declare <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8>, <8 x i16>, i32, i32, i32, i32, i32, i32) + +... +--- +name: test_ctlz_i8 +alignment: 2 +tracksRegLiveness: true +registers: [] +liveins: + - { reg: '$r0', virtual-reg: '' } + - { reg: '$r1', virtual-reg: '' } + - { reg: '$r2', virtual-reg: '' } + - { reg: '$r3', virtual-reg: '' } +frameInfo: + stackSize: 8 + offsetAdjustment: 0 + maxAlignment: 4 +fixedStack: + - { id: 0, type: default, offset: 0, size: 4, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +stack: + - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +callSites: [] +constants: [] +machineFunctionInfo: {} +body: | + bb.0.entry: + successors: %bb.1(0x80000000) + liveins: $r0, $r1, $r2, $r3, $r4, $lr + + frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp + frame-setup CFI_INSTRUCTION def_cfa_offset 8 + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r4, -8 + tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr + t2IT 11, 8, implicit-def $itstate + tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate + renamable $r12 = t2LDRi12 $sp, 8, 14 /* CC::al */, $noreg :: (load 4 from %fixed-stack.0, align 8) + t2DoLoopStart renamable $r12 + $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg + + bb.1.loop.body: + successors: %bb.1(0x7c000000), %bb.2(0x04000000) + liveins: $r0, $r1, $r2, $r3, $r4 + + renamable $vpr = MVE_VCTP16 renamable $r3, 0, $noreg + MVE_VPST 4, implicit $vpr + renamable $r1, renamable $q0 = MVE_VLDRHU16_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.addr.b, align 2) + renamable $q1 = MVE_VLDRHU16 killed renamable $r0, 0, 1, renamable $vpr :: (load 16 from %ir.addr.a, align 2) + $lr = tMOVr $r4, 14 /* CC::al */, $noreg + renamable $r4, dead $cpsr = tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg + renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 8, 14 /* CC::al */, $noreg + renamable $q1 = MVE_VCLZs8 killed renamable $q1, 0, $noreg, undef renamable $q1 + renamable $lr = t2LoopDec killed renamable $lr, 1 + $r0 = tMOVr $r1, 14 /* CC::al */, $noreg + renamable $q1 = MVE_VQSHRUNs16th killed renamable $q1, killed renamable $q0, 1, 0, $noreg + MVE_VPST 8, implicit $vpr + renamable $r2 = MVE_VSTRHU16_post killed renamable $q1, killed renamable $r2, 16, 1, killed renamable $vpr :: (store 16 into %ir.addr.c, align 2) + t2LoopEnd killed renamable $lr, %bb.1, implicit-def dead $cpsr + tB %bb.2, 14 /* CC::al */, $noreg + + bb.2.exit: + tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc + +... +--- +name: test_ctlz_i16 +alignment: 2 +tracksRegLiveness: true +registers: [] +liveins: + - { reg: '$r0', virtual-reg: '' } + - { reg: '$r1', virtual-reg: '' } + - { reg: '$r2', virtual-reg: '' } + - { reg: '$r3', virtual-reg: '' } +frameInfo: + stackSize: 8 + offsetAdjustment: 0 + maxAlignment: 4 +fixedStack: + - { id: 0, type: default, offset: 0, size: 4, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +stack: + - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +callSites: [] +constants: [] +machineFunctionInfo: {} +body: | + bb.0.entry: + successors: %bb.1(0x80000000) + liveins: $r0, $r1, $r2, $r3, $r4, $lr + + frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp + frame-setup CFI_INSTRUCTION def_cfa_offset 8 + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r4, -8 + tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr + t2IT 11, 8, implicit-def $itstate + tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate + renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load 4 from %fixed-stack.0, align 8) + t2DoLoopStart renamable $r4 + $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg + + bb.1.loop.body: + successors: %bb.1(0x7c000000), %bb.2(0x04000000) + liveins: $r0, $r1, $r2, $r3, $r12 + + renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg + $lr = tMOVr $r12, 14 /* CC::al */, $noreg + MVE_VPST 4, implicit $vpr + renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.addr.b, align 4) + renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr :: (load 16 from %ir.addr.a, align 4) + renamable $r12 = t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg + renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg + renamable $q1 = MVE_VCLZs16 killed renamable $q1, 0, $noreg, undef renamable $q1 + renamable $lr = t2LoopDec killed renamable $lr, 1 + renamable $q1 = MVE_VQSHRUNs32th killed renamable $q1, killed renamable $q0, 3, 0, $noreg + MVE_VPST 8, implicit $vpr + renamable $r2 = MVE_VSTRWU32_post killed renamable $q1, killed renamable $r2, 16, 1, killed renamable $vpr :: (store 16 into %ir.addr.c, align 4) + t2LoopEnd killed renamable $lr, %bb.1, implicit-def dead $cpsr + tB %bb.2, 14 /* CC::al */, $noreg + + bb.2.exit: + tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc + +... +--- +name: test_ctlz_i32 +alignment: 2 +tracksRegLiveness: true +registers: [] +liveins: + - { reg: '$r0', virtual-reg: '' } + - { reg: '$r1', virtual-reg: '' } + - { reg: '$r2', virtual-reg: '' } + - { reg: '$r3', virtual-reg: '' } +frameInfo: + stackSize: 8 + offsetAdjustment: 0 + maxAlignment: 4 +fixedStack: + - { id: 0, type: default, offset: 0, size: 4, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +stack: + - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +callSites: [] +constants: [] +machineFunctionInfo: {} +body: | + bb.0.entry: + successors: %bb.1(0x80000000) + liveins: $r0, $r1, $r2, $r3, $r4, $lr + + frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp + frame-setup CFI_INSTRUCTION def_cfa_offset 8 + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r4, -8 + tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr + t2IT 11, 8, implicit-def $itstate + tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate + renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load 4 from %fixed-stack.0, align 8) + t2DoLoopStart renamable $r4 + $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg + + bb.1.loop.body: + successors: %bb.1(0x7c000000), %bb.2(0x04000000) + liveins: $r0, $r1, $r2, $r3, $r12 + + renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg + $lr = tMOVr $r12, 14 /* CC::al */, $noreg + MVE_VPST 4, implicit $vpr + renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr :: (load 16 from %ir.addr.a, align 4) + renamable $r1, renamable $q1 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.addr.b, align 4) + renamable $r12 = t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg + renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg + renamable $q1 = MVE_VCLZs32 killed renamable $q1, 0, $noreg, undef renamable $q1 + renamable $lr = t2LoopDec killed renamable $lr, 1 + renamable $q0 = MVE_VQSHRUNs32th killed renamable $q0, killed renamable $q1, 3, 0, $noreg + MVE_VPST 8, implicit $vpr + renamable $r2 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r2, 16, 1, killed renamable $vpr :: (store 16 into %ir.addr.c, align 4) + t2LoopEnd killed renamable $lr, %bb.1, implicit-def dead $cpsr + tB %bb.2, 14 /* CC::al */, $noreg + + bb.2.exit: + tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc + +... Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-retaining.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-retaining.mir @@ -0,0 +1,273 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - | FileCheck %s + +--- | + define arm_aapcs_vfpcc void @test_vqrshruntq_n_s32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c, i32 %elts, i32 %iters) { + entry: + %cmp = icmp slt i32 %elts, 1 + br i1 %cmp, label %exit, label %loop.ph + + loop.ph: ; preds = %entry + call void @llvm.set.loop.iterations.i32(i32 %iters) + br label %loop.body + + loop.body: ; preds = %loop.body, %loop.ph + %lsr.iv = phi i32 [ %lsr.iv.next, %loop.body ], [ %iters, %loop.ph ] + %count = phi i32 [ %elts, %loop.ph ], [ %elts.rem, %loop.body ] + %addr.a = phi <4 x i32>* [ %a, %loop.ph ], [ %addr.a.next, %loop.body ] + %addr.b = phi <4 x i32>* [ %b, %loop.ph ], [ %addr.b.next, %loop.body ] + %addr.c = phi <4 x i32>* [ %c, %loop.ph ], [ %addr.c.next, %loop.body ] + %pred = call <4 x i1> @llvm.arm.mve.vctp32(i32 %count) + %elts.rem = sub i32 %count, 4 + %masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.a, i32 4, <4 x i1> %pred, <4 x i32> undef) + %masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.b, i32 4, <4 x i1> %pred, <4 x i32> undef) + %bitcast.a = bitcast <4 x i32> %masked.load.a to <8 x i16> + %shrn = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> %bitcast.a, <4 x i32> %masked.load.b, i32 3, i32 1, i32 0, i32 1, i32 0, i32 1) + %bitcast = bitcast <8 x i16> %shrn to <4 x i32> + call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %bitcast, <4 x i32>* %addr.c, i32 4, <4 x i1> %pred) + %addr.a.next = getelementptr <4 x i32>, <4 x i32>* %addr.a, i32 1 + %addr.b.next = getelementptr <4 x i32>, <4 x i32>* %addr.b, i32 1 + %addr.c.next = getelementptr <4 x i32>, <4 x i32>* %addr.c, i32 1 + %loop.dec = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv, i32 1) + %end = icmp ne i32 %loop.dec, 0 + %lsr.iv.next = add i32 %lsr.iv, -1 + br i1 %end, label %loop.body, label %exit + + exit: ; preds = %loop.body, %entry + ret void + } + + define arm_aapcs_vfpcc void @test_vqrshruntq_n_s16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c, i32 %elts, i32 %iters) { + entry: + %cmp = icmp slt i32 %elts, 1 + br i1 %cmp, label %exit, label %loop.ph + + loop.ph: ; preds = %entry + call void @llvm.set.loop.iterations.i32(i32 %iters) + br label %loop.body + + loop.body: ; preds = %loop.body, %loop.ph + %lsr.iv = phi i32 [ %lsr.iv.next, %loop.body ], [ %iters, %loop.ph ] + %count = phi i32 [ %elts, %loop.ph ], [ %elts.rem, %loop.body ] + %addr.a = phi <8 x i16>* [ %a, %loop.ph ], [ %addr.a.next, %loop.body ] + %addr.b = phi <8 x i16>* [ %b, %loop.ph ], [ %addr.b.next, %loop.body ] + %addr.c = phi <8 x i16>* [ %c, %loop.ph ], [ %addr.c.next, %loop.body ] + %pred = call <8 x i1> @llvm.arm.mve.vctp16(i32 %count) + %elts.rem = sub i32 %count, 8 + %masked.load.a = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %addr.a, i32 2, <8 x i1> %pred, <8 x i16> undef) + %masked.load.b = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %addr.b, i32 2, <8 x i1> %pred, <8 x i16> undef) + %bitcast.a = bitcast <8 x i16> %masked.load.a to <16 x i8> + %shrn = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> %bitcast.a, <8 x i16> %masked.load.b, i32 1, i32 1, i32 0, i32 1, i32 0, i32 1) + %bitcast = bitcast <16 x i8> %shrn to <8 x i16> + call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %bitcast, <8 x i16>* %addr.c, i32 2, <8 x i1> %pred) + %addr.a.next = getelementptr <8 x i16>, <8 x i16>* %addr.b, i32 1 + %addr.b.next = getelementptr <8 x i16>, <8 x i16>* %addr.b, i32 1 + %addr.c.next = getelementptr <8 x i16>, <8 x i16>* %addr.c, i32 1 + %loop.dec = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv, i32 1) + %end = icmp ne i32 %loop.dec, 0 + %lsr.iv.next = add i32 %lsr.iv, -1 + br i1 %end, label %loop.body, label %exit + + exit: ; preds = %loop.body, %entry + ret void + } + + declare void @llvm.set.loop.iterations.i32(i32) + declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) + declare <4 x i1> @llvm.arm.mve.vctp32(i32) + declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) + declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>) + declare <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16>, <4 x i32>, i32, i32, i32, i32, i32, i32) + declare <8 x i1> @llvm.arm.mve.vctp16(i32) + declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>) + declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>) + declare <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8>, <8 x i16>, i32, i32, i32, i32, i32, i32) + +... +--- +name: test_vqrshruntq_n_s32 +alignment: 2 +tracksRegLiveness: true +registers: [] +liveins: + - { reg: '$r0', virtual-reg: '' } + - { reg: '$r1', virtual-reg: '' } + - { reg: '$r2', virtual-reg: '' } + - { reg: '$r3', virtual-reg: '' } +frameInfo: + stackSize: 8 + offsetAdjustment: 0 + maxAlignment: 4 + restorePoint: '' +fixedStack: + - { id: 0, type: default, offset: 0, size: 4, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +stack: + - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +callSites: [] +constants: [] +machineFunctionInfo: {} +body: | + ; CHECK-LABEL: name: test_vqrshruntq_n_s32 + ; CHECK: bb.0.entry: + ; CHECK: successors: %bb.1(0x80000000) + ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4 + ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp + ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8 + ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4 + ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8 + ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr + ; CHECK: t2IT 11, 8, implicit-def $itstate + ; CHECK: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate + ; CHECK: renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load 4 from %fixed-stack.0, align 8) + ; CHECK: dead $lr = MVE_DLSTP_32 killed renamable $r3 + ; CHECK: $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg + ; CHECK: bb.1.loop.body: + ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK: liveins: $r0, $r1, $r2, $r12 + ; CHECK: $lr = tMOVr $r12, 14 /* CC::al */, $noreg + ; CHECK: renamable $r12 = t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg + ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg :: (load 16 from %ir.addr.b, align 4) + ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg :: (load 16 from %ir.addr.a, align 4) + ; CHECK: renamable $q1 = MVE_VQSHRUNs32th killed renamable $q1, killed renamable $q0, 3, 0, $noreg + ; CHECK: renamable $r2 = MVE_VSTRWU32_post killed renamable $q1, killed renamable $r2, 16, 0, killed $noreg :: (store 16 into %ir.addr.c, align 4) + ; CHECK: dead $lr = MVE_LETP killed renamable $lr, %bb.1 + ; CHECK: bb.2.exit: + ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc + bb.0.entry: + successors: %bb.1(0x80000000) + liveins: $r0, $r1, $r2, $r3, $r4, $lr + + frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp + frame-setup CFI_INSTRUCTION def_cfa_offset 8 + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r4, -8 + tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr + t2IT 11, 8, implicit-def $itstate + tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate + renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load 4 from %fixed-stack.0, align 8) + t2DoLoopStart renamable $r4 + $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg + + bb.1.loop.body: + successors: %bb.1(0x7c000000), %bb.2(0x04000000) + liveins: $r0, $r1, $r2, $r3, $r12 + + renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg + $lr = tMOVr $r12, 14 /* CC::al */, $noreg + renamable $r12 = t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg + renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg + MVE_VPST 4, implicit $vpr + renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.addr.b, align 4) + renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr :: (load 16 from %ir.addr.a, align 4) + renamable $lr = t2LoopDec killed renamable $lr, 1 + renamable $q1 = MVE_VQSHRUNs32th killed renamable $q1, killed renamable $q0, 3, 0, $noreg + MVE_VPST 8, implicit $vpr + renamable $r2 = MVE_VSTRWU32_post killed renamable $q1, killed renamable $r2, 16, 1, killed renamable $vpr :: (store 16 into %ir.addr.c, align 4) + t2LoopEnd killed renamable $lr, %bb.1, implicit-def dead $cpsr + tB %bb.2, 14 /* CC::al */, $noreg + + bb.2.exit: + tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc + +... +--- +name: test_vqrshruntq_n_s16 +alignment: 2 +tracksRegLiveness: true +registers: [] +liveins: + - { reg: '$r0', virtual-reg: '' } + - { reg: '$r1', virtual-reg: '' } + - { reg: '$r2', virtual-reg: '' } + - { reg: '$r3', virtual-reg: '' } +frameInfo: + stackSize: 8 + offsetAdjustment: 0 + maxAlignment: 4 +fixedStack: + - { id: 0, type: default, offset: 0, size: 4, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +stack: + - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +callSites: [] +constants: [] +machineFunctionInfo: {} +body: | + ; CHECK-LABEL: name: test_vqrshruntq_n_s16 + ; CHECK: bb.0.entry: + ; CHECK: successors: %bb.1(0x80000000) + ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4 + ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp + ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8 + ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4 + ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8 + ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr + ; CHECK: t2IT 11, 8, implicit-def $itstate + ; CHECK: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate + ; CHECK: renamable $r12 = t2LDRi12 $sp, 8, 14 /* CC::al */, $noreg :: (load 4 from %fixed-stack.0, align 8) + ; CHECK: dead $lr = MVE_DLSTP_16 killed renamable $r3 + ; CHECK: $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg + ; CHECK: bb.1.loop.body: + ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK: liveins: $r0, $r1, $r2, $r4 + ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg + ; CHECK: renamable $r4, dead $cpsr = tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg + ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRHU16_post killed renamable $r1, 16, 0, $noreg :: (load 16 from %ir.addr.b, align 2) + ; CHECK: renamable $q1 = MVE_VLDRHU16 killed renamable $r0, 0, 0, $noreg :: (load 16 from %ir.addr.a, align 2) + ; CHECK: $r0 = tMOVr $r1, 14 /* CC::al */, $noreg + ; CHECK: renamable $q1 = MVE_VQSHRUNs16th killed renamable $q1, killed renamable $q0, 1, 0, $noreg + ; CHECK: renamable $r2 = MVE_VSTRHU16_post killed renamable $q1, killed renamable $r2, 16, 0, killed $noreg :: (store 16 into %ir.addr.c, align 2) + ; CHECK: dead $lr = MVE_LETP killed renamable $lr, %bb.1 + ; CHECK: bb.2.exit: + ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc + bb.0.entry: + successors: %bb.1(0x80000000) + liveins: $r0, $r1, $r2, $r3, $r4, $lr + + frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp + frame-setup CFI_INSTRUCTION def_cfa_offset 8 + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r4, -8 + tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr + t2IT 11, 8, implicit-def $itstate + tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate + renamable $r12 = t2LDRi12 $sp, 8, 14 /* CC::al */, $noreg :: (load 4 from %fixed-stack.0, align 8) + t2DoLoopStart renamable $r12 + $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg + + bb.1.loop.body: + successors: %bb.1(0x7c000000), %bb.2(0x04000000) + liveins: $r0, $r1, $r2, $r3, $r4 + + renamable $vpr = MVE_VCTP16 renamable $r3, 0, $noreg + $lr = tMOVr $r4, 14 /* CC::al */, $noreg + renamable $r4, dead $cpsr = tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg + renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 8, 14 /* CC::al */, $noreg + MVE_VPST 4, implicit $vpr + renamable $r1, renamable $q0 = MVE_VLDRHU16_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.addr.b, align 2) + renamable $q1 = MVE_VLDRHU16 killed renamable $r0, 0, 1, renamable $vpr :: (load 16 from %ir.addr.a, align 2) + renamable $lr = t2LoopDec killed renamable $lr, 1 + $r0 = tMOVr $r1, 14 /* CC::al */, $noreg + renamable $q1 = MVE_VQSHRUNs16th killed renamable $q1, killed renamable $q0, 1, 0, $noreg + MVE_VPST 8, implicit $vpr + renamable $r2 = MVE_VSTRHU16_post killed renamable $q1, killed renamable $r2, 16, 1, killed renamable $vpr :: (store 16 into %ir.addr.c, align 2) + t2LoopEnd killed renamable $lr, %bb.1, implicit-def dead $cpsr + tB %bb.2, 14 /* CC::al */, $noreg + + bb.2.exit: + tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc + +... Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-retaining.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-retaining.mir @@ -0,0 +1,281 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - | FileCheck %s + +--- | + define arm_aapcs_vfpcc void @test_vmvn(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c, i32 %elts, i32 %iters) #0 { + entry: + %cmp = icmp slt i32 %elts, 1 + br i1 %cmp, label %exit, label %loop.ph + + loop.ph: ; preds = %entry + call void @llvm.set.loop.iterations.i32(i32 %iters) + br label %loop.body + + loop.body: ; preds = %loop.body, %loop.ph + %lsr.iv = phi i32 [ %lsr.iv.next, %loop.body ], [ %iters, %loop.ph ] + %count = phi i32 [ %elts, %loop.ph ], [ %elts.rem, %loop.body ] + %addr.a = phi <4 x i32>* [ %a, %loop.ph ], [ %addr.a.next, %loop.body ] + %addr.b = phi <4 x i32>* [ %b, %loop.ph ], [ %addr.b.next, %loop.body ] + %addr.c = phi <4 x i32>* [ %c, %loop.ph ], [ %addr.c.next, %loop.body ] + %pred = call <4 x i1> @llvm.arm.mve.vctp32(i32 %count) + %elts.rem = sub i32 %count, 4 + %masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.a, i32 4, <4 x i1> %pred, <4 x i32> undef) + %masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.b, i32 4, <4 x i1> %pred, <4 x i32> undef) + %not = xor <4 x i32> %masked.load.b, + %bitcast.a = bitcast <4 x i32> %masked.load.a to <8 x i16> + %shrn = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> %bitcast.a, <4 x i32> %not, i32 15, i32 1, i32 0, i32 0, i32 0, i32 0) + %bitcast = bitcast <8 x i16> %shrn to <4 x i32> + call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %bitcast, <4 x i32>* %addr.c, i32 4, <4 x i1> %pred) + %addr.a.next = getelementptr <4 x i32>, <4 x i32>* %addr.a, i32 1 + %addr.b.next = getelementptr <4 x i32>, <4 x i32>* %addr.b, i32 1 + %addr.c.next = getelementptr <4 x i32>, <4 x i32>* %addr.c, i32 1 + %loop.dec = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv, i32 1) + %end = icmp ne i32 %loop.dec, 0 + %lsr.iv.next = add i32 %lsr.iv, -1 + br i1 %end, label %loop.body, label %exit + + exit: ; preds = %loop.body, %entry + ret void + } + + define arm_aapcs_vfpcc void @test_vorn(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c, i32 %elts, i32 %iters) #0 { + entry: + %cmp = icmp slt i32 %elts, 1 + br i1 %cmp, label %exit, label %loop.ph + + loop.ph: ; preds = %entry + call void @llvm.set.loop.iterations.i32(i32 %iters) + br label %loop.body + + loop.body: ; preds = %loop.body, %loop.ph + %lsr.iv = phi i32 [ %lsr.iv.next, %loop.body ], [ %iters, %loop.ph ] + %count = phi i32 [ %elts, %loop.ph ], [ %elts.rem, %loop.body ] + %addr.a = phi <4 x i32>* [ %a, %loop.ph ], [ %addr.a.next, %loop.body ] + %addr.b = phi <4 x i32>* [ %b, %loop.ph ], [ %addr.b.next, %loop.body ] + %addr.c = phi <4 x i32>* [ %c, %loop.ph ], [ %addr.c.next, %loop.body ] + %pred = call <4 x i1> @llvm.arm.mve.vctp32(i32 %count) + %elts.rem = sub i32 %count, 4 + %masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.a, i32 4, <4 x i1> %pred, <4 x i32> undef) + %masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.b, i32 4, <4 x i1> %pred, <4 x i32> undef) + %not = xor <4 x i32> %masked.load.b, + %or = or <4 x i32> %not, %masked.load.a + %bitcast.a = bitcast <4 x i32> %masked.load.a to <8 x i16> + %shrn = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> %bitcast.a, <4 x i32> %or, i32 3, i32 1, i32 0, i32 1, i32 0, i32 1) + %bitcast = bitcast <8 x i16> %shrn to <4 x i32> + call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %bitcast, <4 x i32>* %addr.c, i32 4, <4 x i1> %pred) + %addr.a.next = getelementptr <4 x i32>, <4 x i32>* %addr.a, i32 1 + %addr.b.next = getelementptr <4 x i32>, <4 x i32>* %addr.b, i32 1 + %addr.c.next = getelementptr <4 x i32>, <4 x i32>* %addr.c, i32 1 + %loop.dec = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv, i32 1) + %end = icmp ne i32 %loop.dec, 0 + %lsr.iv.next = add i32 %lsr.iv, -1 + br i1 %end, label %loop.body, label %exit + + exit: ; preds = %loop.body, %entry + ret void + } + + declare void @llvm.set.loop.iterations.i32(i32) + declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) + declare <4 x i1> @llvm.arm.mve.vctp32(i32) + declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) + declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>) + declare <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16>, <4 x i32>, i32, i32, i32, i32, i32, i32) + +... +--- +name: test_vmvn +alignment: 2 +tracksRegLiveness: true +registers: [] +liveins: + - { reg: '$r0', virtual-reg: '' } + - { reg: '$r1', virtual-reg: '' } + - { reg: '$r2', virtual-reg: '' } + - { reg: '$r3', virtual-reg: '' } +frameInfo: + stackSize: 8 + offsetAdjustment: 0 + maxAlignment: 4 +fixedStack: + - { id: 0, type: default, offset: 0, size: 4, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +stack: + - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +callSites: [] +constants: [] +machineFunctionInfo: {} +body: | + ; CHECK-LABEL: name: test_vmvn + ; CHECK: bb.0.entry: + ; CHECK: successors: %bb.1(0x80000000) + ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4 + ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp + ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8 + ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4 + ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8 + ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr + ; CHECK: t2IT 11, 8, implicit-def $itstate + ; CHECK: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate + ; CHECK: renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load 4 from %fixed-stack.0, align 8) + ; CHECK: dead $lr = t2DLS renamable $r4 + ; CHECK: $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg + ; CHECK: bb.1.loop.body: + ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12 + ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg + ; CHECK: $lr = tMOVr $r12, 14 /* CC::al */, $noreg + ; CHECK: MVE_VPST 4, implicit $vpr + ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr :: (load 16 from %ir.addr.a, align 4) + ; CHECK: renamable $r1, renamable $q1 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.addr.b, align 4) + ; CHECK: renamable $r12 = t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg + ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg + ; CHECK: renamable $q1 = MVE_VMVN killed renamable $q1, 0, $noreg, undef renamable $q1 + ; CHECK: renamable $q0 = MVE_VQSHRNbhs32 killed renamable $q0, killed renamable $q1, 15, 0, $noreg + ; CHECK: MVE_VPST 8, implicit $vpr + ; CHECK: renamable $r2 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r2, 16, 1, killed renamable $vpr :: (store 16 into %ir.addr.c, align 4) + ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.1 + ; CHECK: bb.2.exit: + ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc + bb.0.entry: + successors: %bb.1(0x80000000) + liveins: $r0, $r1, $r2, $r3, $r4, $lr + + frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp + frame-setup CFI_INSTRUCTION def_cfa_offset 8 + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r4, -8 + tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr + t2IT 11, 8, implicit-def $itstate + frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate + renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load 4 from %fixed-stack.0, align 8) + t2DoLoopStart renamable $r4 + $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg + + bb.1.loop.body: + successors: %bb.1(0x7c000000), %bb.2(0x04000000) + liveins: $r0, $r1, $r2, $r3, $r12 + + renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg + $lr = tMOVr $r12, 14 /* CC::al */, $noreg + MVE_VPST 4, implicit $vpr + renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr :: (load 16 from %ir.addr.a, align 4) + renamable $r1, renamable $q1 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.addr.b, align 4) + renamable $r12 = t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg + renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg + renamable $q1 = MVE_VMVN killed renamable $q1, 0, $noreg, undef renamable $q1 + renamable $lr = t2LoopDec killed renamable $lr, 1 + renamable $q0 = MVE_VQSHRNbhs32 killed renamable $q0, killed renamable $q1, 15, 0, $noreg + MVE_VPST 8, implicit $vpr + renamable $r2 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r2, 16, 1, killed renamable $vpr :: (store 16 into %ir.addr.c, align 4) + t2LoopEnd killed renamable $lr, %bb.1, implicit-def dead $cpsr + tB %bb.2, 14 /* CC::al */, $noreg + + bb.2.exit: + frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc + +... +--- +name: test_vorn +alignment: 2 +tracksRegLiveness: true +registers: [] +liveins: + - { reg: '$r0', virtual-reg: '' } + - { reg: '$r1', virtual-reg: '' } + - { reg: '$r2', virtual-reg: '' } + - { reg: '$r3', virtual-reg: '' } +frameInfo: + stackSize: 8 + offsetAdjustment: 0 + maxAlignment: 4 +fixedStack: + - { id: 0, type: default, offset: 0, size: 4, alignment: 8, stack-id: default, + isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +stack: + - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +callSites: [] +constants: [] +machineFunctionInfo: {} +body: | + ; CHECK-LABEL: name: test_vorn + ; CHECK: bb.0.entry: + ; CHECK: successors: %bb.1(0x80000000) + ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4 + ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp + ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8 + ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4 + ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8 + ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr + ; CHECK: t2IT 11, 8, implicit-def $itstate + ; CHECK: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate + ; CHECK: renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load 4 from %fixed-stack.0, align 8) + ; CHECK: dead $lr = t2DLS renamable $r4 + ; CHECK: $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg + ; CHECK: bb.1.loop.body: + ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12 + ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg + ; CHECK: MVE_VPST 4, implicit $vpr + ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.addr.b, align 4) + ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr :: (load 16 from %ir.addr.a, align 4) + ; CHECK: $lr = tMOVr $r12, 14 /* CC::al */, $noreg + ; CHECK: renamable $r12 = t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg + ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg + ; CHECK: renamable $q0 = MVE_VORN renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0 + ; CHECK: renamable $q1 = MVE_VQSHRUNs32th killed renamable $q1, killed renamable $q0, 3, 0, $noreg + ; CHECK: MVE_VPST 8, implicit $vpr + ; CHECK: renamable $r2 = MVE_VSTRWU32_post killed renamable $q1, killed renamable $r2, 16, 1, killed renamable $vpr :: (store 16 into %ir.addr.c, align 4) + ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.1 + ; CHECK: bb.2.exit: + ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc + bb.0.entry: + successors: %bb.1(0x80000000) + liveins: $r0, $r1, $r2, $r3, $r4, $lr + + frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp + frame-setup CFI_INSTRUCTION def_cfa_offset 8 + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r4, -8 + tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr + t2IT 11, 8, implicit-def $itstate + frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate + renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load 4 from %fixed-stack.0, align 8) + t2DoLoopStart renamable $r4 + $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg + + bb.1.loop.body: + successors: %bb.1(0x7c000000), %bb.2(0x04000000) + liveins: $r0, $r1, $r2, $r3, $r12 + + renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg + MVE_VPST 4, implicit $vpr + renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.addr.b, align 4) + renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr :: (load 16 from %ir.addr.a, align 4) + $lr = tMOVr $r12, 14 /* CC::al */, $noreg + renamable $r12 = t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg + renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg + renamable $q0 = MVE_VORN renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0 + renamable $lr = t2LoopDec killed renamable $lr, 1 + renamable $q1 = MVE_VQSHRUNs32th killed renamable $q1, killed renamable $q0, 3, 0, $noreg + MVE_VPST 8, implicit $vpr + renamable $r2 = MVE_VSTRWU32_post killed renamable $q1, killed renamable $r2, 16, 1, killed renamable $vpr :: (store 16 into %ir.addr.c, align 4) + t2LoopEnd killed renamable $lr, %bb.1, implicit-def dead $cpsr + tB %bb.2, 14 /* CC::al */, $noreg + + bb.2.exit: + frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc + +...