Index: llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp =================================================================== --- llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp +++ llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp @@ -1083,6 +1083,24 @@ if (RDA.isReachingDefLiveOut(Start, CountReg) && RDA.isReachingDefLiveOut(Start, ARM::LR)) InsertPt = FirstNonTerminator; + else { + // Try putting the loop start at the end of the block even if the count or lr are live-out. + // Only possible if the count and LR values would be the same there + auto LastMI = &*MBB->getLastNonDebugInstr(); + if (RDA.hasSameReachingDef(LastMI, &*InsertPt, CountReg) && + RDA.hasSameReachingDef(LastMI, &*InsertPt, ARM::LR)) { + bool DefinesCountOrLR = false; + for (MachineOperand Op : LastMI->operands()) { + if (Op.isReg() && Op.isDef() && + (Op.getReg() == ARM::LR || Op.getReg() == CountReg)) { + DefinesCountOrLR = true; + break; + } + } + if (!DefinesCountOrLR) + InsertPt = FirstNonTerminator; + } + } } else if (RDA.hasSameReachingDef(Start, &*FirstNonTerminator, CountReg) && RDA.hasSameReachingDef(Start, &*FirstNonTerminator, ARM::LR)) InsertPt = FirstNonTerminator; Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/ctlz-non-zeros.mir =================================================================== --- llvm/test/CodeGen/Thumb2/LowOverheadLoops/ctlz-non-zeros.mir +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/ctlz-non-zeros.mir @@ -171,8 +171,8 @@ ; CHECK: successors: %bb.2(0x80000000) ; CHECK: liveins: $r0, $r1, $r2, $r3 ; CHECK: renamable $r12 = t2LDRi12 $sp, 8, 14 /* CC::al */, $noreg :: (load 4 from %fixed-stack.0, align 8) - ; CHECK: dead $lr = t2DLS renamable $r12 - ; CHECK: $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg + ; CHECK: $r4 = tMOVr $r12, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r12 ; CHECK: bb.2.loop.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $r0, $r1, $r2, $r3, $r4 @@ -279,8 +279,8 @@ ; CHECK: successors: %bb.2(0x80000000) ; CHECK: liveins: $r0, $r1, $r2, $r3 ; CHECK: renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load 4 from %fixed-stack.0, align 8) - ; CHECK: dead $lr = t2DLS renamable $r4 - ; CHECK: $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg + ; CHECK: $r12 = tMOVr $r4, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r4 ; CHECK: bb.2.loop.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12 @@ -385,8 +385,8 @@ ; CHECK: successors: %bb.2(0x80000000) ; CHECK: liveins: $r0, $r1, $r2, $r3 ; CHECK: renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load 4 from %fixed-stack.0, align 8) - ; CHECK: dead $lr = t2DLS renamable $r4 - ; CHECK: $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg + ; CHECK: $r12 = tMOVr $r4, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r4 ; CHECK: bb.2.loop.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12 Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/dls-kills-reg.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/dls-kills-reg.mir @@ -0,0 +1,226 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - | FileCheck %s + +--- | + define arm_aapcs_vfpcc void @do_loop_start_kills_reg(float* %pSrc, i32 %blockSize, float* nocapture %pResult) { + entry: + %0 = add i32 %blockSize, 3 + %1 = icmp slt i32 %blockSize, 4 + %smin = select i1 %1, i32 %blockSize, i32 4 + %2 = sub i32 %0, %smin + %3 = lshr i32 %2, 2 + %4 = add nuw nsw i32 %3, 1 + %5 = icmp slt i32 %blockSize, 4 + %smin3 = select i1 %5, i32 %blockSize, i32 4 + %6 = sub i32 %0, %smin3 + %7 = lshr i32 %6, 2 + %8 = add nuw nsw i32 %7, 1 + call void @llvm.set.loop.iterations.i32(i32 %8) + br label %do.body.i + + do.body.i: ; preds = %do.body.i, %entry + %blkCnt.0.i = phi i32 [ %13, %do.body.i ], [ %blockSize, %entry ] + %sumVec.0.i = phi <4 x float> [ %12, %do.body.i ], [ zeroinitializer, %entry ] + %pSrc.addr.0.i = phi float* [ %add.ptr.i, %do.body.i ], [ %pSrc, %entry ] + %9 = phi i32 [ %8, %entry ], [ %14, %do.body.i ] + %pSrc.addr.0.i2 = bitcast float* %pSrc.addr.0.i to <4 x float>* + %10 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %blkCnt.0.i) + %11 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %pSrc.addr.0.i2, i32 4, <4 x i1> %10, <4 x float> zeroinitializer) + %12 = tail call fast <4 x float> @llvm.arm.mve.add.predicated.v4f32.v4i1(<4 x float> %sumVec.0.i, <4 x float> %11, <4 x i1> %10, <4 x float> %sumVec.0.i) + %add.ptr.i = getelementptr inbounds float, float* %pSrc.addr.0.i, i32 4 + %13 = add i32 %blkCnt.0.i, -4 + %14 = call i32 @llvm.loop.decrement.reg.i32(i32 %9, i32 1) + %15 = icmp ne i32 %14, 0 + br i1 %15, label %do.body.i, label %arm_mean_f32_mve.exit + + arm_mean_f32_mve.exit: ; preds = %do.body.i + %16 = extractelement <4 x float> %12, i32 3 + %add2.i.i = fadd fast float %16, %16 + %conv.i = uitofp i32 %blockSize to float + %div.i = fdiv fast float %add2.i.i, %conv.i + %17 = bitcast float %div.i to i32 + call void @llvm.set.loop.iterations.i32(i32 %4) + br label %do.body + + do.body: ; preds = %do.body, %arm_mean_f32_mve.exit + %blkCnt.0 = phi i32 [ %blockSize, %arm_mean_f32_mve.exit ], [ %26, %do.body ] + %sumVec.0 = phi <4 x float> [ zeroinitializer, %arm_mean_f32_mve.exit ], [ %25, %do.body ] + %pSrc.addr.0 = phi float* [ %pSrc, %arm_mean_f32_mve.exit ], [ %add.ptr, %do.body ] + %18 = phi i32 [ %4, %arm_mean_f32_mve.exit ], [ %27, %do.body ] + %pSrc.addr.01 = bitcast float* %pSrc.addr.0 to <4 x float>* + %19 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %blkCnt.0) + %20 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %pSrc.addr.01, i32 4, <4 x i1> %19, <4 x float> zeroinitializer) + %21 = insertelement <4 x i32> undef, i32 %17, i64 0 + %22 = shufflevector <4 x i32> %21, <4 x i32> undef, <4 x i32> zeroinitializer + %23 = bitcast <4 x i32> %22 to <4 x float> + %24 = tail call fast <4 x float> @llvm.arm.mve.sub.predicated.v4f32.v4i1(<4 x float> %20, <4 x float> %23, <4 x i1> %19, <4 x float> undef) + %25 = tail call fast <4 x float> @llvm.arm.mve.fma.predicated.v4f32.v4i1(<4 x float> %24, <4 x float> %24, <4 x float> %sumVec.0, <4 x i1> %19) + %add.ptr = getelementptr inbounds float, float* %pSrc.addr.0, i32 4 + %26 = add i32 %blkCnt.0, -4 + %27 = call i32 @llvm.loop.decrement.reg.i32(i32 %18, i32 1) + %28 = icmp ne i32 %27, 0 + br i1 %28, label %do.body, label %do.end + + do.end: ; preds = %do.body + %29 = extractelement <4 x float> %25, i32 3 + %add2.i = fadd fast float %29, %29 + %sub2 = add i32 %blockSize, -1 + %conv = uitofp i32 %sub2 to float + %div = fdiv fast float %add2.i, %conv + store float %div, float* %pResult, align 4 + ret void + } + + declare <4 x float> @llvm.arm.mve.sub.predicated.v4f32.v4i1(<4 x float>, <4 x float>, <4 x i1>, <4 x float>) + declare <4 x float> @llvm.arm.mve.fma.predicated.v4f32.v4i1(<4 x float>, <4 x float>, <4 x float>, <4 x i1>) + declare <4 x i1> @llvm.arm.mve.vctp32(i32) + declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>) + declare <4 x float> @llvm.arm.mve.add.predicated.v4f32.v4i1(<4 x float>, <4 x float>, <4 x i1>, <4 x float>) + declare void @llvm.set.loop.iterations.i32(i32) + declare i32 @llvm.loop.decrement.reg.i32(i32, i32) +... +--- +name: do_loop_start_kills_reg +alignment: 2 +tracksRegLiveness: true +registers: [] +liveins: + - { reg: '$r0', virtual-reg: '' } + - { reg: '$r1', virtual-reg: '' } + - { reg: '$r2', virtual-reg: '' } +frameInfo: + stackSize: 8 + offsetAdjustment: 0 + maxAlignment: 4 +fixedStack: [] +stack: + - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +callSites: [] +constants: [] +machineFunctionInfo: {} +body: | + ; CHECK-LABEL: name: do_loop_start_kills_reg + ; CHECK: bb.0.entry: + ; CHECK: successors: %bb.1(0x80000000) + ; CHECK: liveins: $lr, $r0, $r1, $r2, $r4 + ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp + ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8 + ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4 + ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8 + ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0 + ; CHECK: $r3 = tMOVr $r1, 14 /* CC::al */, $noreg + ; CHECK: $r12 = tMOVr $r0, 14 /* CC::al */, $noreg + ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3 + ; CHECK: bb.1.do.body.i: + ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r12 + ; CHECK: renamable $r12, renamable $q1 = MVE_VLDRWU32_post killed renamable $r12, 16, 0, $noreg :: (load 16 from %ir.pSrc.addr.0.i2, align 4) + ; CHECK: renamable $q0 = MVE_VADDf32 killed renamable $q0, killed renamable $q1, 0, killed $noreg, killed renamable $q0 + ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.1 + ; CHECK: bb.2.arm_mean_f32_mve.exit: + ; CHECK: successors: %bb.3(0x80000000) + ; CHECK: liveins: $q0, $r0, $r1, $r2 + ; CHECK: $s4 = VMOVSR $r1, 14 /* CC::al */, $noreg + ; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s3, killed renamable $s3, 14 /* CC::al */, $noreg, implicit killed $q0 + ; CHECK: $r3 = tMOVr $r1, 14 /* CC::al */, $noreg + ; CHECK: renamable $s4 = VUITOS killed renamable $s4, 14 /* CC::al */, $noreg + ; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VDIVS killed renamable $s0, killed renamable $s4, 14 /* CC::al */, $noreg + ; CHECK: renamable $r12 = VMOVRS killed renamable $s0, 14 /* CC::al */, $noreg + ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0 + ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3 + ; CHECK: bb.3.do.body: + ; CHECK: successors: %bb.3(0x7c000000), %bb.4(0x04000000) + ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r12 + ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg :: (load 16 from %ir.pSrc.addr.01, align 4) + ; CHECK: renamable $q1 = MVE_VSUB_qr_f32 killed renamable $q1, renamable $r12, 0, $noreg, undef renamable $q1 + ; CHECK: renamable $q0 = MVE_VFMAf32 killed renamable $q0, killed renamable $q1, killed renamable $q1, 0, killed $noreg + ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.3 + ; CHECK: bb.4.do.end: + ; CHECK: liveins: $q0, $r1, $r2 + ; CHECK: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 1, 14 /* CC::al */, $noreg + ; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s3, killed renamable $s3, 14 /* CC::al */, $noreg, implicit killed $q0 + ; CHECK: $s2 = VMOVSR killed $r0, 14 /* CC::al */, $noreg + ; CHECK: renamable $s2 = VUITOS killed renamable $s2, 14 /* CC::al */, $noreg + ; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VDIVS killed renamable $s0, killed renamable $s2, 14 /* CC::al */, $noreg + ; CHECK: VSTRS killed renamable $s0, killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (store 4 into %ir.pResult) + ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc + bb.0.entry: + successors: %bb.1(0x80000000) + liveins: $r0, $r1, $r2, $r4, $lr + + frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp + frame-setup CFI_INSTRUCTION def_cfa_offset 8 + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r4, -8 + $r3 = tMOVr $r1, 14 /* CC::al */, $noreg + tCMPi8 renamable $r1, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr + t2IT 10, 8, implicit-def $itstate + renamable $r3 = tMOVi8 $noreg, 4, 10 /* CC::ge */, killed $cpsr, implicit killed renamable $r3, implicit killed $itstate + renamable $r12 = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg + renamable $r3, dead $cpsr = tSUBrr renamable $r1, killed renamable $r3, 14 /* CC::al */, $noreg + renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0 + renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 3, 14 /* CC::al */, $noreg + renamable $lr = nuw nsw t2ADDrs killed renamable $r12, killed renamable $r3, 19, 14 /* CC::al */, $noreg, $noreg + $r3 = tMOVr $r1, 14 /* CC::al */, $noreg + $r12 = tMOVr $r0, 14 /* CC::al */, $noreg + t2DoLoopStart renamable $lr + $r4 = tMOVr $lr, 14 /* CC::al */, $noreg + + bb.1.do.body.i: + successors: %bb.1(0x7c000000), %bb.2(0x04000000) + liveins: $lr, $q0, $r0, $r1, $r2, $r3, $r4, $r12 + + renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg + renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg + renamable $lr = t2LoopDec killed renamable $lr, 1 + MVE_VPST 4, implicit $vpr + renamable $r12, renamable $q1 = MVE_VLDRWU32_post killed renamable $r12, 16, 1, renamable $vpr :: (load 16 from %ir.pSrc.addr.0.i2, align 4) + renamable $q0 = MVE_VADDf32 killed renamable $q0, killed renamable $q1, 1, killed renamable $vpr, renamable $q0 + t2LoopEnd renamable $lr, %bb.1, implicit-def dead $cpsr + tB %bb.2, 14 /* CC::al */, $noreg + + bb.2.arm_mean_f32_mve.exit: + successors: %bb.3(0x80000000) + liveins: $q0, $r0, $r1, $r2, $r4 + + $s4 = VMOVSR $r1, 14 /* CC::al */, $noreg + $lr = tMOVr $r4, 14 /* CC::al */, $noreg + renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s3, renamable $s3, 14 /* CC::al */, $noreg, implicit $q0 + $r3 = tMOVr $r1, 14 /* CC::al */, $noreg + renamable $s4 = VUITOS killed renamable $s4, 14 /* CC::al */, $noreg + t2DoLoopStart killed $r4 + renamable $s0 = nnan ninf nsz arcp contract afn reassoc VDIVS killed renamable $s0, killed renamable $s4, 14 /* CC::al */, $noreg + renamable $r12 = VMOVRS killed renamable $s0, 14 /* CC::al */, $noreg + renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0 + + bb.3.do.body: + successors: %bb.3(0x7c000000), %bb.4(0x04000000) + liveins: $lr, $q0, $r0, $r1, $r2, $r3, $r12 + + renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg + renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg + renamable $lr = t2LoopDec killed renamable $lr, 1 + MVE_VPST 2, implicit $vpr + renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr :: (load 16 from %ir.pSrc.addr.01, align 4) + renamable $q1 = MVE_VSUB_qr_f32 killed renamable $q1, renamable $r12, 1, renamable $vpr, undef renamable $q1 + renamable $q0 = MVE_VFMAf32 killed renamable $q0, killed renamable $q1, renamable $q1, 1, killed renamable $vpr + t2LoopEnd renamable $lr, %bb.3, implicit-def dead $cpsr + tB %bb.4, 14 /* CC::al */, $noreg + + bb.4.do.end: + liveins: $q0, $r1, $r2 + + renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 1, 14 /* CC::al */, $noreg + renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s3, renamable $s3, 14 /* CC::al */, $noreg, implicit $q0 + $s2 = VMOVSR killed $r0, 14 /* CC::al */, $noreg + renamable $s2 = VUITOS killed renamable $s2, 14 /* CC::al */, $noreg + renamable $s0 = nnan ninf nsz arcp contract afn reassoc VDIVS killed renamable $s0, killed renamable $s2, 14 /* CC::al */, $noreg + VSTRS killed renamable $s0, killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (store 4 into %ir.pResult) + frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc + +... Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-1.mir =================================================================== --- llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-1.mir +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-1.mir @@ -151,8 +151,8 @@ ; CHECK: renamable $lr = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0 ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg - ; CHECK: $lr = t2DLS killed renamable $lr - ; CHECK: $r4 = tMOVr killed $lr, 14 /* CC::al */, $noreg + ; CHECK: $r4 = tMOVr $lr, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $lr ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r12 Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-2.mir =================================================================== --- llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-2.mir +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-2.mir @@ -151,8 +151,8 @@ ; CHECK: renamable $lr = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0 ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg - ; CHECK: $lr = t2DLS killed renamable $lr - ; CHECK: $r4 = tMOVr killed $lr, 14 /* CC::al */, $noreg + ; CHECK: $r4 = tMOVr $lr, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $lr ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r12 Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-3.mir =================================================================== --- llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-3.mir +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-3.mir @@ -151,8 +151,8 @@ ; CHECK: renamable $lr = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0 ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg - ; CHECK: $lr = t2DLS killed renamable $lr - ; CHECK: $r4 = tMOVr killed $lr, 14 /* CC::al */, $noreg + ; CHECK: $r4 = tMOVr $lr, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $lr ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r12 Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-1.mir =================================================================== --- llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-1.mir +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-1.mir @@ -148,8 +148,8 @@ ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0 ; CHECK: renamable $lr = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $r5 = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg - ; CHECK: dead $lr = t2DLS renamable $r5 - ; CHECK: $r4 = tMOVr killed $r5, 14 /* CC::al */, $noreg + ; CHECK: $r4 = tMOVr $r5, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r5 ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r12 Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-2.mir =================================================================== --- llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-2.mir +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-2.mir @@ -150,8 +150,8 @@ ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0 ; CHECK: renamable $lr = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $r5 = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg - ; CHECK: dead $lr = t2DLS renamable $r5 - ; CHECK: $r4 = tMOVr killed $r5, 14 /* CC::al */, $noreg + ; CHECK: $r4 = tMOVr $r5, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r5 ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r12 Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/invariant-qreg.mir =================================================================== --- llvm/test/CodeGen/Thumb2/LowOverheadLoops/invariant-qreg.mir +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/invariant-qreg.mir @@ -279,8 +279,8 @@ ; CHECK: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r1, 19, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $r1 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r1, 0, 0, $noreg :: (load 16 from %fixed-stack.0, align 8) - ; CHECK: dead $lr = t2DLS renamable $r3 - ; CHECK: $r1 = tMOVr killed $r3, 14 /* CC::al */, $noreg + ; CHECK: $r1 = tMOVr $r3, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r3 ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $q0, $r0, $r1, $r2 @@ -397,8 +397,8 @@ ; CHECK: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r1, 19, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $r1 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r1, 0, 0, $noreg :: (load 16 from %fixed-stack.0, align 8) - ; CHECK: dead $lr = t2DLS renamable $r3 - ; CHECK: $r1 = tMOVr killed $r3, 14 /* CC::al */, $noreg + ; CHECK: $r1 = tMOVr $r3, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r3 ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $q0, $r0, $r1, $r2 Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-chain-store.mir =================================================================== --- llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-chain-store.mir +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-chain-store.mir @@ -143,21 +143,17 @@ ; CHECK: renamable $r2, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r2, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg ; CHECK: t2STRi12 renamable $lr, killed renamable $r3, 0, 14 /* CC::al */, $noreg :: (store 4 into %ir.iter.addr) - ; CHECK: $lr = t2DLS killed renamable $lr ; CHECK: $r2 = tMOVr killed $lr, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = MVE_DLSTP_32 killed renamable $r12 ; CHECK: bb.1.do.body: ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000) - ; CHECK: liveins: $r0, $r1, $r2, $r12 + ; CHECK: liveins: $r0, $r1, $r2 ; CHECK: $lr = tMOVr $r2, 14 /* CC::al */, $noreg - ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg ; CHECK: renamable $r2, dead $cpsr = nsw tSUBi8 killed $r2, 1, 14 /* CC::al */, $noreg - ; CHECK: renamable $r12 = nsw t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg - ; CHECK: MVE_VPST 8, implicit $vpr - ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr :: (load 16 from %ir.pSrc.addr.02, align 4) + ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg :: (load 16 from %ir.pSrc.addr.02, align 4) ; CHECK: renamable $q0 = MVE_VMULf32 killed renamable $q0, killed renamable $q0, 0, $noreg, undef renamable $q0 - ; CHECK: MVE_VPST 8, implicit $vpr - ; CHECK: renamable $r1 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r1, 16, 1, killed renamable $vpr :: (store 16 into %ir.pDst.addr.01, align 4) - ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.1 + ; CHECK: renamable $r1 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r1, 16, 0, killed $noreg :: (store 16 into %ir.pDst.addr.01, align 4) + ; CHECK: dead $lr = MVE_LETP killed renamable $lr, %bb.1 ; CHECK: bb.2.do.end: ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc bb.0.entry: @@ -247,22 +243,18 @@ ; CHECK: renamable $lr = t2ADDri killed renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $r2, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r2, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg - ; CHECK: $lr = t2DLS killed renamable $lr ; CHECK: t2STRi12 renamable $lr, killed renamable $r3, 0, 14 /* CC::al */, $noreg :: (store 4 into %ir.iter.addr) ; CHECK: $r2 = tMOVr killed $lr, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = MVE_DLSTP_32 killed renamable $r12 ; CHECK: bb.1.do.body: ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000) - ; CHECK: liveins: $r0, $r1, $r2, $r12 + ; CHECK: liveins: $r0, $r1, $r2 ; CHECK: $lr = tMOVr $r2, 14 /* CC::al */, $noreg - ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg ; CHECK: renamable $r2, dead $cpsr = nsw tSUBi8 killed $r2, 1, 14 /* CC::al */, $noreg - ; CHECK: renamable $r12 = nsw t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg - ; CHECK: MVE_VPST 8, implicit $vpr - ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr :: (load 16 from %ir.pSrc.addr.02, align 4) + ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg :: (load 16 from %ir.pSrc.addr.02, align 4) ; CHECK: renamable $q0 = MVE_VMULf32 killed renamable $q0, killed renamable $q0, 0, $noreg, undef renamable $q0 - ; CHECK: MVE_VPST 8, implicit $vpr - ; CHECK: renamable $r1 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r1, 16, 1, killed renamable $vpr :: (store 16 into %ir.pDst.addr.01, align 4) - ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.1 + ; CHECK: renamable $r1 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r1, 16, 0, killed $noreg :: (store 16 into %ir.pDst.addr.01, align 4) + ; CHECK: dead $lr = MVE_LETP killed renamable $lr, %bb.1 ; CHECK: bb.2.do.end: ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc bb.0.entry: Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-mov.mir =================================================================== --- llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-mov.mir +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-mov.mir @@ -46,7 +46,6 @@ ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4 ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8 ; CHECK: tCMPi8 renamable $r1, 2, 14 /* CC::al */, $noreg, implicit-def $cpsr - ; CHECK: renamable $r12 = t2MOVi 4, 14 /* CC::al */, $noreg, $noreg ; CHECK: tBcc %bb.2, 2 /* CC::hs */, killed $cpsr ; CHECK: bb.1: ; CHECK: liveins: $r2 @@ -55,49 +54,39 @@ ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc ; CHECK: bb.2: ; CHECK: successors: %bb.3(0x80000000) - ; CHECK: liveins: $r0, $r1, $r2, $r12 - ; CHECK: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg - ; CHECK: tCMPi8 renamable $r1, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr - ; CHECK: t2IT 11, 8, implicit-def $itstate - ; CHECK: $r12 = tMOVr renamable $r1, 11 /* CC::lt */, killed $cpsr, implicit killed renamable $r12, implicit killed $itstate - ; CHECK: renamable $r3 = t2SUBrr renamable $r1, killed renamable $r12, 14 /* CC::al */, $noreg, $noreg - ; CHECK: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 3, 14 /* CC::al */, $noreg + ; CHECK: liveins: $r0, $r1, $r2 ; CHECK: $r12 = tMOVr $r1, 14 /* CC::al */, $noreg - ; CHECK: renamable $r4 = nuw nsw t2ADDrs killed renamable $r4, killed renamable $r3, 19, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0 ; CHECK: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r12 ; CHECK: bb.3: ; CHECK: successors: %bb.3(0x7c000000), %bb.4(0x04000000) - ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r3, $r4 + ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r3 ; CHECK: renamable $q1 = nnan ninf nsz MVE_VLDRWU32 renamable $r3, 0, 0, $noreg ; CHECK: renamable $q0 = nnan ninf nsz MVE_VADDf32 killed renamable $q0, killed renamable $q1, 0, killed $noreg, killed renamable $q0 ; CHECK: renamable $r3, dead $cpsr = nuw tADDi8 killed renamable $r3, 16, 14 /* CC::al */, $noreg ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.3 ; CHECK: bb.4: ; CHECK: successors: %bb.5(0x80000000) - ; CHECK: liveins: $q0, $r0, $r1, $r2, $r4 + ; CHECK: liveins: $q0, $r0, $r1, $r2 ; CHECK: renamable $s4 = nnan ninf nsz VADDS renamable $s0, renamable $s1, 14 /* CC::al */, $noreg ; CHECK: $r3 = tMOVr $r1, 14 /* CC::al */, $noreg ; CHECK: renamable $s4 = nnan ninf nsz VADDS renamable $s2, killed renamable $s4, 14 /* CC::al */, $noreg ; CHECK: renamable $s0 = nnan ninf nsz VADDS killed renamable $s3, killed renamable $s4, 14 /* CC::al */, $noreg, implicit killed $q0 ; CHECK: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg ; CHECK: renamable $s2 = VUITOS killed renamable $s2, 14 /* CC::al */, $noreg - ; CHECK: $lr = t2DLS killed $r4 ; CHECK: renamable $s4 = nnan ninf nsz VDIVS killed renamable $s0, killed renamable $s2, 14 /* CC::al */, $noreg ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0 + ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3 ; CHECK: bb.5: ; CHECK: successors: %bb.5(0x7c000000), %bb.6(0x04000000) - ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r3, $s4 - ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg + ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $s4 ; CHECK: $r4 = VMOVRS $s4, 14 /* CC::al */, $noreg - ; CHECK: MVE_VPST 2, implicit $vpr - ; CHECK: renamable $q2 = nnan ninf nsz MVE_VLDRWU32 renamable $r0, 0, 1, renamable $vpr - ; CHECK: renamable $q2 = nnan ninf nsz MVE_VSUB_qr_f32 killed renamable $q2, killed renamable $r4, 1, renamable $vpr, undef renamable $q2 - ; CHECK: renamable $q0 = nnan ninf nsz MVE_VFMAf32 killed renamable $q0, killed renamable $q2, killed renamable $q2, 1, killed renamable $vpr - ; CHECK: renamable $r3, dead $cpsr = nsw tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg + ; CHECK: renamable $q2 = nnan ninf nsz MVE_VLDRWU32 renamable $r0, 0, 0, $noreg + ; CHECK: renamable $q2 = nnan ninf nsz MVE_VSUB_qr_f32 killed renamable $q2, killed renamable $r4, 0, $noreg, undef renamable $q2 + ; CHECK: renamable $q0 = nnan ninf nsz MVE_VFMAf32 killed renamable $q0, killed renamable $q2, killed renamable $q2, 0, killed $noreg ; CHECK: renamable $r0, dead $cpsr = nuw tADDi8 killed renamable $r0, 16, 14 /* CC::al */, $noreg - ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.5 + ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.5 ; CHECK: bb.6: ; CHECK: liveins: $q0, $r1, $r2 ; CHECK: renamable $s4 = nnan ninf nsz VADDS renamable $s0, renamable $s1, 14 /* CC::al */, $noreg Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dlstp.mir =================================================================== --- llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dlstp.mir +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dlstp.mir @@ -152,47 +152,35 @@ ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8 ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4 ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8 - ; CHECK: $r3 = tMOVr $r1, 14 /* CC::al */, $noreg - ; CHECK: tCMPi8 renamable $r1, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr - ; CHECK: t2IT 10, 8, implicit-def $itstate - ; CHECK: renamable $r3 = tMOVi8 $noreg, 4, 10 /* CC::ge */, killed $cpsr, implicit killed renamable $r3, implicit killed $itstate - ; CHECK: renamable $r12 = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg - ; CHECK: renamable $r3, dead $cpsr = tSUBrr renamable $r1, killed renamable $r3, 14 /* CC::al */, $noreg ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0 - ; CHECK: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 3, 14 /* CC::al */, $noreg - ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r12, killed renamable $r3, 19, 14 /* CC::al */, $noreg, $noreg ; CHECK: $r3 = tMOVr $r1, 14 /* CC::al */, $noreg ; CHECK: $r12 = tMOVr $r0, 14 /* CC::al */, $noreg - ; CHECK: $r4 = tMOVr killed $lr, 14 /* CC::al */, $noreg ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3 ; CHECK: bb.1.do.body.i: ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000) - ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r4, $r12 + ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r12 ; CHECK: renamable $r12, renamable $q1 = MVE_VLDRWU32_post killed renamable $r12, 16, 0, $noreg :: (load 16 from %ir.pSrc.addr.0.i2, align 4) ; CHECK: renamable $q0 = nnan ninf nsz arcp contract afn reassoc MVE_VADDf32 killed renamable $q0, killed renamable $q1, 0, killed $noreg, killed renamable $q0 ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.1 ; CHECK: bb.2.arm_mean_f32_mve.exit: ; CHECK: successors: %bb.3(0x80000000) - ; CHECK: liveins: $q0, $r0, $r1, $r2, $r4 + ; CHECK: liveins: $q0, $r0, $r1, $r2 ; CHECK: $s4 = VMOVSR $r1, 14 /* CC::al */, $noreg ; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s3, killed renamable $s3, 14 /* CC::al */, $noreg, implicit killed $q0 - ; CHECK: $lr = t2DLS killed $r4 ; CHECK: renamable $s4 = VUITOS killed renamable $s4, 14 /* CC::al */, $noreg ; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VDIVS killed renamable $s0, killed renamable $s4, 14 /* CC::al */, $noreg ; CHECK: renamable $r3 = VMOVRS killed renamable $s0, 14 /* CC::al */, $noreg ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0 ; CHECK: renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, undef renamable $q1 ; CHECK: $r3 = tMOVr $r1, 14 /* CC::al */, $noreg + ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3 ; CHECK: bb.3.do.body: ; CHECK: successors: %bb.3(0x7c000000), %bb.4(0x04000000) - ; CHECK: liveins: $lr, $q0, $q1, $r0, $r1, $r2, $r3 - ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg - ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg - ; CHECK: MVE_VPST 2, implicit $vpr - ; CHECK: renamable $r0, renamable $q2 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr :: (load 16 from %ir.pSrc.addr.01, align 4) - ; CHECK: renamable $q2 = nnan ninf nsz arcp contract afn reassoc MVE_VSUBf32 killed renamable $q2, renamable $q1, 1, renamable $vpr, undef renamable $q2 - ; CHECK: renamable $q0 = nnan ninf nsz arcp contract afn reassoc MVE_VFMAf32 killed renamable $q0, killed renamable $q2, killed renamable $q2, 1, killed renamable $vpr - ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.3 + ; CHECK: liveins: $lr, $q0, $q1, $r0, $r1, $r2 + ; CHECK: renamable $r0, renamable $q2 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg :: (load 16 from %ir.pSrc.addr.01, align 4) + ; CHECK: renamable $q2 = nnan ninf nsz arcp contract afn reassoc MVE_VSUBf32 killed renamable $q2, renamable $q1, 0, $noreg, undef renamable $q2 + ; CHECK: renamable $q0 = nnan ninf nsz arcp contract afn reassoc MVE_VFMAf32 killed renamable $q0, killed renamable $q2, killed renamable $q2, 0, killed $noreg + ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.3 ; CHECK: bb.4.do.end: ; CHECK: liveins: $q0, $r1, $r2 ; CHECK: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 1, 14 /* CC::al */, $noreg Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-operand.ll =================================================================== --- llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-operand.ll +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-operand.ll @@ -6,18 +6,9 @@ ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, lr} ; CHECK-NEXT: push {r4, lr} -; CHECK-NEXT: mov r3, r1 -; CHECK-NEXT: cmp r1, #4 -; CHECK-NEXT: it ge -; CHECK-NEXT: movge r3, #4 -; CHECK-NEXT: mov.w r12, #1 -; CHECK-NEXT: subs r3, r1, r3 ; CHECK-NEXT: vmov.i32 q0, #0x0 -; CHECK-NEXT: adds r3, #3 -; CHECK-NEXT: add.w lr, r12, r3, lsr #2 ; CHECK-NEXT: mov r3, r1 ; CHECK-NEXT: mov r12, r0 -; CHECK-NEXT: mov r4, lr ; CHECK-NEXT: dlstp.32 lr, r3 ; CHECK-NEXT: .LBB0_1: @ %do.body.i ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 @@ -29,19 +20,16 @@ ; CHECK-NEXT: vadd.f32 s0, s3, s3 ; CHECK-NEXT: mov r3, r1 ; CHECK-NEXT: vcvt.f32.u32 s4, s4 -; CHECK-NEXT: dls lr, r4 ; CHECK-NEXT: vdiv.f32 s0, s0, s4 ; CHECK-NEXT: vmov r12, s0 ; CHECK-NEXT: vmov.i32 q0, #0x0 +; CHECK-NEXT: dlstp.32 lr, r3 ; CHECK-NEXT: .LBB0_3: @ %do.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vctp.32 r3 -; CHECK-NEXT: subs r3, #4 -; CHECK-NEXT: vpsttt -; CHECK-NEXT: vldrwt.u32 q1, [r0], #16 -; CHECK-NEXT: vsubt.f32 q1, q1, r12 -; CHECK-NEXT: vfmat.f32 q0, q1, q1 -; CHECK-NEXT: le lr, .LBB0_3 +; CHECK-NEXT: vldrw.u32 q1, [r0], #16 +; CHECK-NEXT: vsub.f32 q1, q1, r12 +; CHECK-NEXT: vfma.f32 q0, q1, q1 +; CHECK-NEXT: letp lr, .LBB0_3 ; CHECK-NEXT: @ %bb.4: @ %do.end ; CHECK-NEXT: subs r0, r1, #1 ; CHECK-NEXT: vadd.f32 s0, s3, s3 Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-retaining.mir =================================================================== --- llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-retaining.mir +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-retaining.mir @@ -129,8 +129,8 @@ ; CHECK: successors: %bb.2(0x80000000) ; CHECK: liveins: $r0, $r1, $r2, $r3 ; CHECK: renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load 4 from %fixed-stack.0, align 8) - ; CHECK: dead $lr = MVE_DLSTP_32 killed renamable $r3 ; CHECK: $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = MVE_DLSTP_32 killed renamable $r3 ; CHECK: bb.2.loop.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $r0, $r1, $r2, $r12 @@ -229,8 +229,8 @@ ; CHECK: successors: %bb.2(0x80000000) ; CHECK: liveins: $r0, $r1, $r2, $r3 ; CHECK: renamable $r12 = t2LDRi12 $sp, 8, 14 /* CC::al */, $noreg :: (load 4 from %fixed-stack.0, align 8) - ; CHECK: dead $lr = MVE_DLSTP_16 killed renamable $r3 ; CHECK: $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = MVE_DLSTP_16 killed renamable $r3 ; CHECK: bb.2.loop.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $r0, $r1, $r2, $r4 Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/unpredicated-max.mir =================================================================== --- llvm/test/CodeGen/Thumb2/LowOverheadLoops/unpredicated-max.mir +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/unpredicated-max.mir @@ -92,8 +92,8 @@ ; CHECK: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg ; CHECK: $r12 = t2MOVi16 32768, 14 /* CC::al */, $noreg ; CHECK: $r12 = t2MOVTi16 killed $r12, 65535, 14 /* CC::al */, $noreg - ; CHECK: dead $lr = t2DLS renamable $r3 - ; CHECK: $r5 = tMOVr killed $r3, 14 /* CC::al */, $noreg + ; CHECK: $r5 = tMOVr $r3, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r3 ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $r0, $r1, $r2, $r5, $r12 Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-retaining.mir =================================================================== --- llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-retaining.mir +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-retaining.mir @@ -127,8 +127,8 @@ ; CHECK: successors: %bb.2(0x80000000) ; CHECK: liveins: $r0, $r1, $r2, $r3 ; CHECK: renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load 4 from %fixed-stack.0, align 8) - ; CHECK: dead $lr = t2DLS renamable $r4 - ; CHECK: $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg + ; CHECK: $r12 = tMOVr $r4, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r4 ; CHECK: bb.2.loop.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12 @@ -233,8 +233,8 @@ ; CHECK: successors: %bb.2(0x80000000) ; CHECK: liveins: $r0, $r1, $r2, $r3 ; CHECK: renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load 4 from %fixed-stack.0, align 8) - ; CHECK: dead $lr = t2DLS renamable $r4 - ; CHECK: $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg + ; CHECK: $r12 = tMOVr $r4, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r4 ; CHECK: bb.2.loop.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12 Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/vaddv.mir =================================================================== --- llvm/test/CodeGen/Thumb2/LowOverheadLoops/vaddv.mir +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/vaddv.mir @@ -1266,8 +1266,8 @@ ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg ; CHECK: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg - ; CHECK: dead $lr = t2DLS renamable $r12 - ; CHECK: $r3 = tMOVr killed $r12, 14 /* CC::al */, $noreg + ; CHECK: $r3 = tMOVr $r12, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r12 ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $r0, $r1, $r2, $r3 @@ -1499,8 +1499,8 @@ ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg ; CHECK: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg - ; CHECK: dead $lr = t2DLS renamable $r12 - ; CHECK: $r3 = tMOVr killed $r12, 14 /* CC::al */, $noreg + ; CHECK: $r3 = tMOVr $r12, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r12 ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $r0, $r1, $r2, $r3 @@ -1737,8 +1737,8 @@ ; CHECK: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $r3 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg :: (load 16 from %fixed-stack.0, align 8) - ; CHECK: dead $lr = t2DLS renamable $r12 - ; CHECK: $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg + ; CHECK: $r4 = tMOVr $r12, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r12 ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $q0, $r0, $r1, $r2, $r4 @@ -1859,8 +1859,8 @@ ; CHECK: renamable $d1 = VLDRD $sp, 2, 14 /* CC::al */, $noreg, implicit killed $q0, implicit-def $q0 :: (load 8 from %fixed-stack.0) ; CHECK: renamable $r2 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 19, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg - ; CHECK: dead $lr = t2DLS renamable $r2 - ; CHECK: $r4 = tMOVr killed $r2, 14 /* CC::al */, $noreg + ; CHECK: $r4 = tMOVr $r2, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r2 ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $q0, $r0, $r1, $r3, $r4 @@ -1991,8 +1991,8 @@ ; CHECK: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $r3 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg :: (load 16 from %fixed-stack.0, align 8) - ; CHECK: dead $lr = t2DLS renamable $r12 - ; CHECK: $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg + ; CHECK: $r4 = tMOVr $r12, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r12 ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $q0, $r0, $r1, $r2, $r4 @@ -2112,8 +2112,8 @@ ; CHECK: renamable $d1 = VLDRD $sp, 2, 14 /* CC::al */, $noreg, implicit killed $q0, implicit-def $q0 :: (load 8 from %fixed-stack.0) ; CHECK: renamable $r2 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 19, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg - ; CHECK: dead $lr = t2DLS renamable $r2 - ; CHECK: $r4 = tMOVr killed $r2, 14 /* CC::al */, $noreg + ; CHECK: $r4 = tMOVr $r2, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r2 ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $q0, $r0, $r1, $r3, $r4 @@ -2244,8 +2244,8 @@ ; CHECK: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 27, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $r3 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg :: (load 16 from %fixed-stack.0, align 8) - ; CHECK: dead $lr = t2DLS renamable $r12 - ; CHECK: $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg + ; CHECK: $r4 = tMOVr $r12, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r12 ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $q0, $r0, $r1, $r2, $r4 @@ -2365,8 +2365,8 @@ ; CHECK: renamable $d1 = VLDRD $sp, 2, 14 /* CC::al */, $noreg, implicit killed $q0, implicit-def $q0 :: (load 8 from %fixed-stack.0) ; CHECK: renamable $r2 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 27, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg - ; CHECK: dead $lr = t2DLS renamable $r2 - ; CHECK: $r4 = tMOVr killed $r2, 14 /* CC::al */, $noreg + ; CHECK: $r4 = tMOVr $r2, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r2 ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $q0, $r0, $r1, $r3, $r4 @@ -2497,8 +2497,8 @@ ; CHECK: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 27, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $r3 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg :: (load 16 from %fixed-stack.0, align 8) - ; CHECK: dead $lr = t2DLS renamable $r12 - ; CHECK: $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg + ; CHECK: $r4 = tMOVr $r12, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r12 ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $q0, $r0, $r1, $r2, $r4 @@ -2618,8 +2618,8 @@ ; CHECK: renamable $d1 = VLDRD $sp, 2, 14 /* CC::al */, $noreg, implicit killed $q0, implicit-def $q0 :: (load 8 from %fixed-stack.0) ; CHECK: renamable $r2 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 27, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg - ; CHECK: dead $lr = t2DLS renamable $r2 - ; CHECK: $r4 = tMOVr killed $r2, 14 /* CC::al */, $noreg + ; CHECK: $r4 = tMOVr $r2, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r2 ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $q0, $r0, $r1, $r3, $r4 Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-add-operand-liveout.mir =================================================================== --- llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-add-operand-liveout.mir +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-add-operand-liveout.mir @@ -128,8 +128,8 @@ ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg ; CHECK: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg - ; CHECK: dead $lr = t2DLS renamable $r12 - ; CHECK: $r3 = tMOVr killed $r12, 14 /* CC::al */, $noreg + ; CHECK: $r3 = tMOVr $r12, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r12 ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $q1, $r0, $r1, $r2, $r3 Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-operand-liveout.mir =================================================================== --- llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-operand-liveout.mir +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-operand-liveout.mir @@ -125,8 +125,8 @@ ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg ; CHECK: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg - ; CHECK: dead $lr = t2DLS renamable $r12 - ; CHECK: $r3 = tMOVr killed $r12, 14 /* CC::al */, $noreg + ; CHECK: $r3 = tMOVr $r12, 14 /* CC::al */, $noreg + ; CHECK: dead $lr = t2DLS killed renamable $r12 ; CHECK: bb.2.vector.body: ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK: liveins: $q1, $r0, $r1, $r2, $r3