Index: llvm/lib/Target/ARM/MVEVPTBlockPass.cpp
===================================================================
--- llvm/lib/Target/ARM/MVEVPTBlockPass.cpp
+++ llvm/lib/Target/ARM/MVEVPTBlockPass.cpp
@@ -34,30 +34,30 @@
 #define DEBUG_TYPE "arm-mve-vpt"
 
 namespace {
-  class MVEVPTBlock : public MachineFunctionPass {
-  public:
-    static char ID;
-    const Thumb2InstrInfo *TII;
-    const TargetRegisterInfo *TRI;
+class MVEVPTBlock : public MachineFunctionPass {
+public:
+  static char ID;
+  const Thumb2InstrInfo *TII;
+  const TargetRegisterInfo *TRI;
 
-    MVEVPTBlock() : MachineFunctionPass(ID) {}
+  MVEVPTBlock() : MachineFunctionPass(ID) {}
 
-    bool runOnMachineFunction(MachineFunction &Fn) override;
+  bool runOnMachineFunction(MachineFunction &Fn) override;
 
-    MachineFunctionProperties getRequiredProperties() const override {
-      return MachineFunctionProperties().set(
-          MachineFunctionProperties::Property::NoVRegs);
-    }
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::NoVRegs);
+  }
 
-    StringRef getPassName() const override {
-      return "MVE VPT block insertion pass";
-    }
+  StringRef getPassName() const override {
+    return "MVE VPT block insertion pass";
+  }
 
-  private:
-    bool InsertVPTBlocks(MachineBasicBlock &MBB);
-  };
+private:
+  bool InsertVPTBlocks(MachineBasicBlock &MBB);
+};
 
-  char MVEVPTBlock::ID = 0;
+char MVEVPTBlock::ID = 0;
 
 } // end anonymous namespace
 
@@ -94,15 +94,84 @@
   return &*CmpMI;
 }
 
+// Adds "elses" to a T, TT or TTT blockmask.
+static uint64_t AddElsesToARMVPTBlockMask(uint64_t BlockMask,
+                                          unsigned NumElses) {
+  static uint64_t Table[3][3] = {
+      /*T  */ {ARMVCC::TE, ARMVCC::TEE, ARMVCC::TEEE},
+      /*TT */ {ARMVCC::TTE, ARMVCC::TTEE, 0},
+      /*TTT*/ {ARMVCC::TTTE, 0, 0},
+  };
+  assert(NumElses != 0);
+  switch (BlockMask) {
+  case ARMVCC::T:
+    assert(NumElses <= 3 && "Cannot add more than 3 elses to a T BlockMask - "
+                            "Max BlockMask size is 4");
+    return Table[0][NumElses - 1];
+  case ARMVCC::TT:
+    assert(NumElses <= 2 && "Cannot add more than 2 elses to a TT BlockMask - "
+                            "Max BlockMask size is 4");
+    return Table[1][NumElses - 1];
+  case ARMVCC::TTT:
+    assert(NumElses == 1 && "Cannot add more than 1 else to a T BlockMask - "
+                            "Max BlockMask size is 4");
+    return Table[2][NumElses - 1];
+  default:
+    llvm_unreachable("Unsupported BlockMask");
+  }
+}
+
+// Skips a block of predicated instructions.
+// Returns true if this successfully skipped the whole block of predicated
+// instructions. Returns false when it stopped early (due to MaxSkips), or if
+// MBIter didn't point to a predicated instruction.
+static bool SkipPredicatedInstrs(MachineBasicBlock &Block,
+                                 MachineBasicBlock::instr_iterator &MBIter,
+                                 ARMVCC::VPTCodes Pred, unsigned MaxSkips,
+                                 unsigned &NumInstSkipped) {
+  MachineBasicBlock::instr_iterator EndIter = Block.instr_end();
+  ARMVCC::VPTCodes NextPred = ARMVCC::None;
+  unsigned PredReg;
+  NumInstSkipped = 0;
+
+  while (MBIter != EndIter) {
+    NextPred = getVPTInstrPredicate(*MBIter, PredReg);
+    assert(NextPred != ARMVCC::Else &&
+           "VPT block pass does not expect Else preds");
+    if ((NextPred != Pred) || (MaxSkips == 0))
+      break;
+    --MaxSkips;
+    ++MBIter;
+    ++NumInstSkipped;
+  };
+
+  return NumInstSkipped != 0 && (NextPred == ARMVCC::None || MBIter == EndIter);
+}
+
+// Returns true if we can safely optimize the block following a VPNOT.
+//
+// This considers that a VPNOT can be safely removed when at least one of the
+// instructions in its block defines VPR, or uses+kills it.
+static bool CanOptimizeVPNOTBlock(MachineBasicBlock::iterator Iter,
+                                  MachineBasicBlock::iterator End) {
+  for (; Iter != End; ++Iter)
+    if (Iter->definesRegister(ARM::VPR) ||
+        Iter->findRegisterUseOperandIdx(ARM::VPR, true) != -1)
+      return true;
+  return false;
+}
+
 bool MVEVPTBlock::InsertVPTBlocks(MachineBasicBlock &Block) {
   bool Modified = false;
   MachineBasicBlock::instr_iterator MBIter = Block.instr_begin();
   MachineBasicBlock::instr_iterator EndIter = Block.instr_end();
 
+  SmallVector<MachineInstr *, 4> DeadInstructions;
+
   while (MBIter != EndIter) {
     MachineInstr *MI = &*MBIter;
     unsigned PredReg = 0;
-    DebugLoc dl = MI->getDebugLoc();
+    DebugLoc DL = MI->getDebugLoc();
 
     ARMVCC::VPTCodes Pred = getVPTInstrPredicate(*MI, PredReg);
 
@@ -114,46 +183,84 @@
     // hope we'll never generate an Else as input to this pass.
     assert(Pred != ARMVCC::Else && "VPT block pass does not expect Else preds");
 
-    if (Pred == ARMVCC::None) {
-      ++MBIter;
+    ++MBIter;
+    if (Pred == ARMVCC::None)
       continue;
-    }
-
-    LLVM_DEBUG(dbgs() << "VPT block created for: "; MI->dump());
-    int VPTInstCnt = 1;
-    ARMVCC::VPTCodes NextPred;
 
     // Look at subsequent instructions, checking if they can be in the same VPT
     // block.
-    ++MBIter;
-    while (MBIter != EndIter && VPTInstCnt < 4) {
-      NextPred = getVPTInstrPredicate(*MBIter, PredReg);
-      assert(NextPred != ARMVCC::Else &&
-             "VPT block pass does not expect Else preds");
-      if (NextPred != Pred)
-        break;
-      LLVM_DEBUG(dbgs() << "  adding : "; MBIter->dump());
-      ++VPTInstCnt;
-      ++MBIter;
-    };
-
-    unsigned BlockMask = getARMVPTBlockMask(VPTInstCnt);
+    LLVM_DEBUG(dbgs() << "VPT block created for: "; MI->dump());
+
+    // The number of instructions in the "then" part of the VPT block.
+    int VPTThenInstCnt = 1;
+
+    unsigned SkippedInstrs;
+    SkipPredicatedInstrs(Block, MBIter, Pred, 3, SkippedInstrs);
+
+    VPTThenInstCnt += SkippedInstrs;
+    assert(VPTThenInstCnt <= 4);
+
+    LLVM_DEBUG(
+        for (MachineBasicBlock::instr_iterator Iter = ++MI->getIterator(),
+             End = MBIter;
+             Iter != End; ++Iter) {
+          dbgs() << "  adding: ";
+          Iter->dump();
+        });
+
+    // Generate the initial BlockMask
+    unsigned BlockMask = getARMVPTBlockMask(VPTThenInstCnt);
+
+    // If the next instr is VPNOT, we can even add an else block if there's room
+    // for it in the VPT block above (at least one instr available) and if we
+    // can safely remove the VPNOT
+    if (MBIter != EndIter && VPTThenInstCnt < 4 &&
+        MBIter->getOpcode() == ARM::MVE_VPNOT) {
+      unsigned VPTElseInstCnt = 0;
+      MachineBasicBlock::instr_iterator AfterVPNotIter = MBIter;
+      ++AfterVPNotIter;
+      // Skip through the predicated instructions of the VPNOT, stopping after
+      // (4 - VPTThenInstCnt). If the manage to skip a whole block, continue.
+      if (SkipPredicatedInstrs(Block, AfterVPNotIter, ARMVCC::Then,
+                               (4 - VPTThenInstCnt), VPTElseInstCnt)) {
+        // Check if we can optimize the VPNOT block.
+        if (CanOptimizeVPNOTBlock(MBIter, AfterVPNotIter)) {
+          LLVM_DEBUG(dbgs() << "  removing VPNOT: "; MBIter->dump(););
+
+          // Record the VPNot to remove it later.
+          DeadInstructions.push_back(&*MBIter);
+          ++MBIter;
+
+          // Update the BlockMask to include the elses.
+          BlockMask = AddElsesToARMVPTBlockMask(BlockMask, VPTElseInstCnt);
+
+          // Replace all Thens with Elses in the predicated instructions block
+          // and move MBIter past the end of the block.
+          for (; MBIter != AfterVPNotIter; ++MBIter) {
+            int OpIdx = findFirstVPTPredOperandIdx(*MBIter);
+            assert(OpIdx != -1);
+            MBIter->getOperand(OpIdx).setImm(ARMVCC::Else);
+
+            LLVM_DEBUG(dbgs() << "    adding else: "; MBIter->dump(););
+          }
+        }
+      }
+    }
 
     // Search back for a VCMP that can be folded to create a VPT, or else create
     // a VPST directly
     MachineInstrBuilder MIBuilder;
     unsigned NewOpcode;
-    MachineInstr *VCMP = findVCMPToFoldIntoVPST(MI, TRI, NewOpcode);
-    if (VCMP) {
+    if (MachineInstr *VCMP = findVCMPToFoldIntoVPST(MI, TRI, NewOpcode)) {
       LLVM_DEBUG(dbgs() << "  folding VCMP into VPST: "; VCMP->dump());
-      MIBuilder = BuildMI(Block, MI, dl, TII->get(NewOpcode));
+      MIBuilder = BuildMI(Block, MI, DL, TII->get(NewOpcode));
       MIBuilder.addImm(BlockMask);
       MIBuilder.add(VCMP->getOperand(1));
       MIBuilder.add(VCMP->getOperand(2));
       MIBuilder.add(VCMP->getOperand(3));
       VCMP->eraseFromParent();
     } else {
-      MIBuilder = BuildMI(Block, MI, dl, TII->get(ARM::MVE_VPST));
+      MIBuilder = BuildMI(Block, MI, DL, TII->get(ARM::MVE_VPST));
       MIBuilder.addImm(BlockMask);
     }
 
@@ -162,6 +269,15 @@
 
     Modified = true;
   }
+
+  // Erase all dead instructions
+  for (MachineInstr *DeadMI : DeadInstructions) {
+    if (DeadMI->isInsideBundle())
+      DeadMI->eraseFromBundle();
+    else
+      DeadMI->eraseFromParent();
+  }
+
   return Modified;
 }
 
Index: llvm/test/CodeGen/Thumb2/mve-pred-not.ll
===================================================================
--- llvm/test/CodeGen/Thumb2/mve-pred-not.ll
+++ llvm/test/CodeGen/Thumb2/mve-pred-not.ll
@@ -383,11 +383,9 @@
 define arm_aapcs_vfpcc <4 x i32> @vpnot_v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
 ; CHECK-LABEL: vpnot_v4i1:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vpt.s32 lt, q0, zr
+; CHECK-NEXT:    vpte.s32 lt, q0, zr
 ; CHECK-NEXT:    vcmpt.s32 gt, q1, zr
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vcmpt.i32 eq, q2, zr
+; CHECK-NEXT:    vcmpe.i32 eq, q2, zr
 ; CHECK-NEXT:    vpsel q0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
Index: llvm/test/CodeGen/Thumb2/mve-pred-threshold.ll
===================================================================
--- llvm/test/CodeGen/Thumb2/mve-pred-threshold.ll
+++ llvm/test/CodeGen/Thumb2/mve-pred-threshold.ll
@@ -19,11 +19,9 @@
 ; CHECK-NEXT:  .LBB0_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
-; CHECK-NEXT:    vpt.s32 ge, q1, r2
+; CHECK-NEXT:    vpte.s32 ge, q1, r2
 ; CHECK-NEXT:    vcmpt.s32 le, q1, r1
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vstrwt.32 q0, [r0], #16
+; CHECK-NEXT:    vstrwe.32 q0, [r0], #16
 ; CHECK-NEXT:    le lr, .LBB0_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
@@ -77,11 +75,9 @@
 ; CHECK-NEXT:  .LBB1_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrh.u16 q1, [r0]
-; CHECK-NEXT:    vpt.s16 ge, q1, r2
+; CHECK-NEXT:    vpte.s16 ge, q1, r2
 ; CHECK-NEXT:    vcmpt.s16 le, q1, r1
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vstrht.16 q0, [r0], #16
+; CHECK-NEXT:    vstrhe.16 q0, [r0], #16
 ; CHECK-NEXT:    le lr, .LBB1_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
@@ -135,11 +131,9 @@
 ; CHECK-NEXT:  .LBB2_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrb.u8 q1, [r0]
-; CHECK-NEXT:    vpt.s8 ge, q1, r2
+; CHECK-NEXT:    vpte.s8 ge, q1, r2
 ; CHECK-NEXT:    vcmpt.s8 le, q1, r1
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vstrbt.8 q0, [r0], #16
+; CHECK-NEXT:    vstrbe.8 q0, [r0], #16
 ; CHECK-NEXT:    le lr, .LBB2_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
@@ -197,11 +191,9 @@
 ; CHECK-NEXT:  .LBB3_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q3, [r0]
-; CHECK-NEXT:    vpt.f32 le, q0, q3
+; CHECK-NEXT:    vpte.f32 le, q0, q3
 ; CHECK-NEXT:    vcmpt.f32 le, q3, q1
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vstrwt.32 q2, [r0], #16
+; CHECK-NEXT:    vstrwe.32 q2, [r0], #16
 ; CHECK-NEXT:    le lr, .LBB3_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
@@ -259,11 +251,9 @@
 ; CHECK-NEXT:  .LBB4_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrh.u16 q3, [r0]
-; CHECK-NEXT:    vpt.f16 le, q1, q3
+; CHECK-NEXT:    vpte.f16 le, q1, q3
 ; CHECK-NEXT:    vcmpt.f16 le, q3, q0
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vstrht.16 q2, [r0], #16
+; CHECK-NEXT:    vstrhe.16 q2, [r0], #16
 ; CHECK-NEXT:    le lr, .LBB4_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
@@ -322,11 +312,9 @@
 ; CHECK-NEXT:  .LBB5_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
-; CHECK-NEXT:    vpt.s32 ge, q1, r2
+; CHECK-NEXT:    vpte.s32 ge, q1, r2
 ; CHECK-NEXT:    vcmpt.s32 le, q1, r1
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vstrwt.32 q0, [r0], #16
+; CHECK-NEXT:    vstrwe.32 q0, [r0], #16
 ; CHECK-NEXT:    le lr, .LBB5_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
@@ -380,11 +368,9 @@
 ; CHECK-NEXT:  .LBB6_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrh.u16 q1, [r0]
-; CHECK-NEXT:    vpt.s16 ge, q1, r2
+; CHECK-NEXT:    vpte.s16 ge, q1, r2
 ; CHECK-NEXT:    vcmpt.s16 le, q1, r1
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vstrht.16 q0, [r0], #16
+; CHECK-NEXT:    vstrhe.16 q0, [r0], #16
 ; CHECK-NEXT:    le lr, .LBB6_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
@@ -438,11 +424,9 @@
 ; CHECK-NEXT:  .LBB7_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrb.u8 q1, [r0]
-; CHECK-NEXT:    vpt.s8 ge, q1, r2
+; CHECK-NEXT:    vpte.s8 ge, q1, r2
 ; CHECK-NEXT:    vcmpt.s8 le, q1, r1
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vstrbt.8 q0, [r0], #16
+; CHECK-NEXT:    vstrbe.8 q0, [r0], #16
 ; CHECK-NEXT:    le lr, .LBB7_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
@@ -500,11 +484,9 @@
 ; CHECK-NEXT:  .LBB8_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q3, [r0]
-; CHECK-NEXT:    vpt.f32 le, q0, q3
+; CHECK-NEXT:    vpte.f32 le, q0, q3
 ; CHECK-NEXT:    vcmpt.f32 le, q3, q1
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vstrwt.32 q2, [r0], #16
+; CHECK-NEXT:    vstrwe.32 q2, [r0], #16
 ; CHECK-NEXT:    le lr, .LBB8_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
@@ -562,11 +544,9 @@
 ; CHECK-NEXT:  .LBB9_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrh.u16 q3, [r0]
-; CHECK-NEXT:    vpt.f16 le, q1, q3
+; CHECK-NEXT:    vpte.f16 le, q1, q3
 ; CHECK-NEXT:    vcmpt.f16 le, q3, q0
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vstrht.16 q2, [r0], #16
+; CHECK-NEXT:    vstrhe.16 q2, [r0], #16
 ; CHECK-NEXT:    le lr, .LBB9_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
 ; CHECK-NEXT:    pop {r7, pc}
Index: llvm/test/CodeGen/Thumb2/mve-vpt-3-blocks-kill-vpr.mir
===================================================================
--- llvm/test/CodeGen/Thumb2/mve-vpt-3-blocks-kill-vpr.mir
+++ llvm/test/CodeGen/Thumb2/mve-vpt-3-blocks-kill-vpr.mir
@@ -68,14 +68,10 @@
     ; CHECK: liveins: $q0, $q1, $q2, $r0
     ; CHECK: $vpr = VMSR_P0 killed $r0, 14 /* CC::al */, $noreg
     ; CHECK: $q3 = MVE_VORR $q0, $q0, 0, $noreg, undef $q3
-    ; CHECK: BUNDLE implicit-def $q3, implicit-def $d6, implicit-def $s12, implicit-def $s13, implicit-def $d7, implicit-def $s14, implicit-def $s15, implicit $vpr, implicit killed $q1, implicit $q2, implicit killed $q3 {
-    ; CHECK:   MVE_VPST 8, implicit $vpr
+    ; CHECK: BUNDLE implicit-def dead $q3, implicit-def $d6, implicit-def $s12, implicit-def $s13, implicit-def $d7, implicit-def $s14, implicit-def $s15, implicit-def $vpr, implicit-def $q1, implicit-def $d2, implicit-def $s4, implicit-def $s5, implicit-def $d3, implicit-def $s6, implicit-def $s7, implicit killed $vpr, implicit killed $q1, implicit $q2, implicit killed $q3 {
+    ; CHECK:   MVE_VPST 12, implicit $vpr
     ; CHECK:   renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr, killed renamable $q3
-    ; CHECK: }
-    ; CHECK: renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
-    ; CHECK: BUNDLE implicit-def $q1, implicit-def $d2, implicit-def $s4, implicit-def $s5, implicit-def $d3, implicit-def $s6, implicit-def $s7, implicit $vpr, implicit killed $q3, implicit undef $q1 {
-    ; CHECK:   MVE_VPST 8, implicit $vpr
-    ; CHECK:   renamable $q1 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q3, renamable $q3, 1, renamable $vpr, undef renamable $q1
+    ; CHECK:   renamable $q1 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q3, internal renamable $q3, 2, internal renamable $vpr, undef renamable $q1
     ; CHECK: }
     ; CHECK: $q3 = MVE_VORR $q0, $q0, 0, $noreg, undef $q3
     ; CHECK: BUNDLE implicit-def dead $q3, implicit-def $d6, implicit-def $s12, implicit-def $s13, implicit-def $d7, implicit-def $s14, implicit-def $s15, implicit-def $q0, implicit-def $d0, implicit-def $s0, implicit-def $s1, implicit-def $d1, implicit-def $s2, implicit-def $s3, implicit killed $vpr, implicit killed $q1, implicit killed $q2, implicit killed $q3, implicit killed $q0 {
Index: llvm/test/CodeGen/Thumb2/mve-vpt-nots.mir
===================================================================
--- llvm/test/CodeGen/Thumb2/mve-vpt-nots.mir
+++ llvm/test/CodeGen/Thumb2/mve-vpt-nots.mir
@@ -61,14 +61,10 @@
 
     ; CHECK-LABEL: name: vpnot
     ; CHECK: liveins: $q0, $q1, $q2
-    ; CHECK: BUNDLE implicit-def $vpr, implicit $q0, implicit $zr, implicit $q1 {
-    ; CHECK:   MVE_VPTv4s32r 8, renamable $q0, $zr, 11, implicit-def $vpr
+    ; CHECK: BUNDLE implicit-def $vpr, implicit $q0, implicit $zr, implicit $q1, implicit killed $q2 {
+    ; CHECK:   MVE_VPTv4s32r 12, renamable $q0, $zr, 11, implicit-def $vpr
     ; CHECK:   renamable $vpr = MVE_VCMPs32r renamable $q1, $zr, 12, 1, internal killed renamable $vpr
-    ; CHECK: }
-    ; CHECK: renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
-    ; CHECK: BUNDLE implicit-def $vpr, implicit killed $vpr, implicit killed $q2, implicit $zr {
-    ; CHECK:   MVE_VPST 8, implicit $vpr
-    ; CHECK:   renamable $vpr = MVE_VCMPi32r killed renamable $q2, $zr, 0, 1, killed renamable $vpr
+    ; CHECK:   renamable $vpr = MVE_VCMPi32r killed renamable $q2, $zr, 0, 2, internal killed renamable $vpr
     ; CHECK: }
     ; CHECK: renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr
     ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit $q0
@@ -244,14 +240,10 @@
     ; CHECK: liveins: $q0, $q1, $q2
     ; CHECK: renamable $vpr = MVE_VCMPs32r renamable $q0, $zr, 11, 0, $noreg
     ; CHECK: renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
-    ; CHECK: BUNDLE implicit-def $vpr, implicit killed $vpr, implicit $q1, implicit $zr {
-    ; CHECK:   MVE_VPST 8, implicit $vpr
+    ; CHECK: BUNDLE implicit-def $vpr, implicit killed $vpr, implicit $q1, implicit $zr, implicit killed $q2 {
+    ; CHECK:   MVE_VPST 12, implicit $vpr
     ; CHECK:   renamable $vpr = MVE_VCMPs32r renamable $q1, $zr, 12, 1, killed renamable $vpr
-    ; CHECK: }
-    ; CHECK: renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
-    ; CHECK: BUNDLE implicit-def $vpr, implicit killed $vpr, implicit killed $q2, implicit $zr {
-    ; CHECK:   MVE_VPST 8, implicit $vpr
-    ; CHECK:   renamable $vpr = MVE_VCMPi32r killed renamable $q2, $zr, 0, 1, killed renamable $vpr
+    ; CHECK:   renamable $vpr = MVE_VCMPi32r killed renamable $q2, $zr, 0, 2, internal killed renamable $vpr
     ; CHECK: }
     ; CHECK: renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
     ; CHECK: renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr