diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h @@ -582,6 +582,13 @@ unsigned FrameReg, int &Offset, const ARMBaseInstrInfo &TII); +void addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB); +void addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB, unsigned DestReg); + +void addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond); +void addPredicatedMveVpredROp(MachineInstrBuilder &MIB, unsigned Cond, + unsigned Inactive); + } // end namespace llvm #endif // LLVM_LIB_TARGET_ARM_ARMBASEINSTRINFO_H diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -805,6 +805,32 @@ .addReg(ARM::CPSR, RegState::Implicit | RegState::Define); } +void llvm::addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB) +{ + MIB.addImm(ARMVCC::None); + MIB.addReg(0); +} + +void llvm::addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB, + unsigned DestReg) +{ + addUnpredicatedMveVpredNOp(MIB); + MIB.addReg(DestReg, RegState::Undef); +} + +void llvm::addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond) +{ + MIB.addImm(Cond); + MIB.addReg(ARM::VPR, RegState::Implicit); +} + +void llvm::addPredicatedMveVpredROp(MachineInstrBuilder &MIB, + unsigned Cond, unsigned Inactive) +{ + addPredicatedMveVpredNOp(MIB, Cond); + MIB.addReg(Inactive); +} + void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, unsigned DestReg, @@ -833,14 +859,17 @@ else if (ARM::DPRRegClass.contains(DestReg, SrcReg) && Subtarget.hasFP64()) Opc = ARM::VMOVD; else if (ARM::QPRRegClass.contains(DestReg, SrcReg)) - Opc = ARM::VORRq; + Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR; if (Opc) { MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg); MIB.addReg(SrcReg, getKillRegState(KillSrc)); - if (Opc == ARM::VORRq) + if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR) MIB.addReg(SrcReg, getKillRegState(KillSrc)); - MIB.add(predOps(ARMCC::AL)); + if (Opc == ARM::MVE_VORR) + addUnpredicatedMveVpredROp(MIB, DestReg); + else + MIB.add(predOps(ARMCC::AL)); return; } @@ -901,6 +930,26 @@ } else if (DestReg == ARM::CPSR) { copyToCPSR(MBB, I, SrcReg, KillSrc, Subtarget); return; + } else if (DestReg == ARM::VPR) { + BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMSR_P0), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc)) + .add(predOps(ARMCC::AL)); + return; + } else if (SrcReg == ARM::VPR) { + BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMRS_P0), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc)) + .add(predOps(ARMCC::AL)); + return; + } else if (DestReg == ARM::FPSCR_NZCV) { + BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMSR_FPSCR_NZCVQC), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc)) + .add(predOps(ARMCC::AL)); + return; + } else if (SrcReg == ARM::FPSCR_NZCV) { + BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMRS_FPSCR_NZCVQC), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc)) + .add(predOps(ARMCC::AL)); + return; } assert(Opc && "Impossible reg-to-reg copy"); @@ -1010,6 +1059,13 @@ .addImm(0) .addMemOperand(MMO) .add(predOps(ARMCC::AL)); + } else if (ARM::VCCRRegClass.hasSubClassEq(RC)) { + BuildMI(MBB, I, DebugLoc(), get(ARM::VSTR_P0_off)) + .addReg(SrcReg, getKillRegState(isKill)) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } else llvm_unreachable("Unknown reg class!"); break; @@ -1042,7 +1098,7 @@ llvm_unreachable("Unknown reg class!"); break; case 16: - if (ARM::DPairRegClass.hasSubClassEq(RC)) { + if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) { // Use aligned spills if the stack can be realigned. if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { BuildMI(MBB, I, DebugLoc(), get(ARM::VST1q64)) @@ -1058,6 +1114,14 @@ .addMemOperand(MMO) .add(predOps(ARMCC::AL)); } + } else if (ARM::QPRRegClass.hasSubClassEq(RC) && + Subtarget.hasMVEIntegerOps()) { + auto MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::MVE_VSTRWU32_off)); + MIB.addReg(SrcReg, getKillRegState(isKill)) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO); + addUnpredicatedMveVpredNOp(MIB); } else llvm_unreachable("Unknown reg class!"); break; @@ -1155,6 +1219,13 @@ return MI.getOperand(0).getReg(); } break; + case ARM::VSTR_P0_off: + if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() && + MI.getOperand(1).getImm() == 0) { + FrameIndex = MI.getOperand(0).getIndex(); + return ARM::P0; + } + break; case ARM::VST1q64: case ARM::VST1d64TPseudo: case ARM::VST1d64QPseudo: @@ -1225,6 +1296,12 @@ .addImm(0) .addMemOperand(MMO) .add(predOps(ARMCC::AL)); + } else if (ARM::VCCRRegClass.hasSubClassEq(RC)) { + BuildMI(MBB, I, DL, get(ARM::VLDR_P0_off), DestReg) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO) + .add(predOps(ARMCC::AL)); } else llvm_unreachable("Unknown reg class!"); break; @@ -1261,7 +1338,7 @@ llvm_unreachable("Unknown reg class!"); break; case 16: - if (ARM::DPairRegClass.hasSubClassEq(RC)) { + if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) { if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg) .addFrameIndex(FI) @@ -1274,6 +1351,13 @@ .addMemOperand(MMO) .add(predOps(ARMCC::AL)); } + } else if (ARM::QPRRegClass.hasSubClassEq(RC) && + Subtarget.hasMVEIntegerOps()) { + auto MIB = BuildMI(MBB, I, DL, get(ARM::MVE_VLDRWU32_off), DestReg); + MIB.addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO); + addUnpredicatedMveVpredNOp(MIB); } else llvm_unreachable("Unknown reg class!"); break; @@ -1370,6 +1454,13 @@ return MI.getOperand(0).getReg(); } break; + case ARM::VLDR_P0_off: + if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() && + MI.getOperand(1).getImm() == 0) { + FrameIndex = MI.getOperand(0).getIndex(); + return ARM::P0; + } + break; case ARM::VLD1q64: case ARM::VLD1d8TPseudo: case ARM::VLD1d16TPseudo: diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp --- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -146,6 +146,9 @@ SDValue &OffImm); bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N, SDValue &OffImm); + template + bool SelectT2AddrModeImm7(SDValue N, SDValue &Base, + SDValue &OffImm); bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base, SDValue &OffReg, SDValue &ShImm); bool SelectT2AddrModeExclusive(SDValue N, SDValue &Base, SDValue &OffImm); @@ -1268,6 +1271,37 @@ return false; } +template +bool ARMDAGToDAGISel::SelectT2AddrModeImm7(SDValue N, + SDValue &Base, SDValue &OffImm) { + if (N.getOpcode() == ISD::SUB || + CurDAG->isBaseWithConstantOffset(N)) { + if (auto RHS = dyn_cast(N.getOperand(1))) { + int RHSC = (int)RHS->getZExtValue(); + if (N.getOpcode() == ISD::SUB) + RHSC = -RHSC; + + if (RHSC % (1 << Shift) == 0 && + RHSC >= -(0x7F << Shift) && + RHSC <= +(0x7F << Shift)) { + Base = N.getOperand(0); + if (Base.getOpcode() == ISD::FrameIndex) { + int FI = cast(Base)->getIndex(); + Base = CurDAG->getTargetFrameIndex( + FI, TLI->getPointerTy(CurDAG->getDataLayout())); + } + OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32); + return true; + } + } + } + + // Base only. + Base = N; + OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32); + return true; +} + bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N, SDValue &Base, SDValue &OffReg, SDValue &ShImm) { diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h --- a/llvm/lib/Target/ARM/ARMISelLowering.h +++ b/llvm/lib/Target/ARM/ARMISelLowering.h @@ -813,6 +813,8 @@ MachineBasicBlock *MBB) const; MachineBasicBlock *EmitLowered__dbzchk(MachineInstr &MI, MachineBasicBlock *MBB) const; + void addMVEITypes(); + void addMVEFPTypes(); }; enum NEONModImmType { diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -221,6 +221,30 @@ addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); } +void ARMTargetLowering::addMVEITypes() { + const MVT iTypes[] = { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }; + + for (auto VT : iTypes) { + addRegisterClass(VT, &ARM::QPRRegClass); + for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) + setOperationAction(Opc, VT, Expand); + setOperationAction(ISD::LOAD, VT, Legal); + setOperationAction(ISD::STORE, VT, Legal); + } +} + +void ARMTargetLowering::addMVEFPTypes() { + const MVT fTypes[] = { MVT::v4f32, MVT::v8f16 }; + + for (MVT VT : fTypes) { + addRegisterClass(VT, &ARM::QPRRegClass); + for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) + setOperationAction(Opc, VT, Expand); + setOperationAction(ISD::LOAD, VT, Legal); + setOperationAction(ISD::STORE, VT, Legal); + } +} + ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, const ARMSubtarget &STI) : TargetLowering(TM), Subtarget(&STI) { @@ -548,6 +572,12 @@ setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom); setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom); + if (Subtarget->hasMVEIntegerOps()) + addMVEITypes(); + + if (Subtarget->hasMVEFloatOps()) + addMVEFPTypes(); + if (Subtarget->hasNEON()) { addDRTypeForNEON(MVT::v2f32); addDRTypeForNEON(MVT::v8i8); diff --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td --- a/llvm/lib/Target/ARM/ARMInstrMVE.td +++ b/llvm/lib/Target/ARM/ARMInstrMVE.td @@ -4066,3 +4066,63 @@ let Constraints = ""; } + +class MVE_unpred_vector_store_typed + : Pat<(StoreKind (Ty MQPR:$val), t2addrmode_imm7:$addr), + (RegImmInst (Ty MQPR:$val), t2addrmode_imm7:$addr)>; + +multiclass MVE_unpred_vector_store { + def : MVE_unpred_vector_store_typed; + def : MVE_unpred_vector_store_typed; + def : MVE_unpred_vector_store_typed; + def : MVE_unpred_vector_store_typed; + def : MVE_unpred_vector_store_typed; + def : MVE_unpred_vector_store_typed; +} + +class MVE_unpred_vector_load_typed + : Pat<(Ty (LoadKind t2addrmode_imm7:$addr)), + (Ty (RegImmInst t2addrmode_imm7:$addr))>; +multiclass MVE_unpred_vector_load { + def : MVE_unpred_vector_load_typed; + def : MVE_unpred_vector_load_typed; + def : MVE_unpred_vector_load_typed; + def : MVE_unpred_vector_load_typed; + def : MVE_unpred_vector_load_typed; + def : MVE_unpred_vector_load_typed; +} + +let Predicates = [HasMVEInt, IsLE] in { + defm : MVE_unpred_vector_store; + defm : MVE_unpred_vector_store; + defm : MVE_unpred_vector_store; + + defm : MVE_unpred_vector_load; + defm : MVE_unpred_vector_load; + defm : MVE_unpred_vector_load; + + def : Pat<(v16i1 (load t2addrmode_imm7<2>:$addr)), + (v16i1 (VLDR_P0_off t2addrmode_imm7<2>:$addr))>; + def : Pat<(v8i1 (load t2addrmode_imm7<2>:$addr)), + (v8i1 (VLDR_P0_off t2addrmode_imm7<2>:$addr))>; + def : Pat<(v4i1 (load t2addrmode_imm7<2>:$addr)), + (v4i1 (VLDR_P0_off t2addrmode_imm7<2>:$addr))>; +} + +let Predicates = [HasMVEInt, IsBE] in { + def : MVE_unpred_vector_store_typed; + def : MVE_unpred_vector_store_typed; + def : MVE_unpred_vector_store_typed; + def : MVE_unpred_vector_store_typed; + def : MVE_unpred_vector_store_typed; + + def : MVE_unpred_vector_load_typed; + def : MVE_unpred_vector_load_typed; + def : MVE_unpred_vector_load_typed; + def : MVE_unpred_vector_load_typed; + def : MVE_unpred_vector_load_typed; +} diff --git a/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp b/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp --- a/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp +++ b/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp @@ -203,85 +203,102 @@ MachineInstr *MI = &*MBBI; DebugLoc dl = MI->getDebugLoc(); unsigned PredReg = 0; - ARMCC::CondCodes CC = getITInstrPredicate(*MI, PredReg); - if (CC == ARMCC::AL) { - ++MBBI; - continue; - } + ARMCC::CondCodes CC; + + if ((CC = getITInstrPredicate(*MI, PredReg)) != ARMCC::AL) { + Defs.clear(); + Uses.clear(); + TrackDefUses(MI, Defs, Uses, TRI); - Defs.clear(); - Uses.clear(); - TrackDefUses(MI, Defs, Uses, TRI); - - // Insert an IT instruction. - MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII->get(ARM::t2IT)) - .addImm(CC); - - // Add implicit use of ITSTATE to IT block instructions. - MI->addOperand(MachineOperand::CreateReg(ARM::ITSTATE, false/*ifDef*/, - true/*isImp*/, false/*isKill*/)); - - MachineInstr *LastITMI = MI; - MachineBasicBlock::iterator InsertPos = MIB.getInstr(); - ++MBBI; - - // Form IT block. - ARMCC::CondCodes OCC = ARMCC::getOppositeCondition(CC); - unsigned Mask = 0, Pos = 3; - - // v8 IT blocks are limited to one conditional op unless -arm-no-restrict-it - // is set: skip the loop - if (!restrictIT) { - // Branches, including tricky ones like LDM_RET, need to end an IT - // block so check the instruction we just put in the block. - for (; MBBI != E && Pos && - (!MI->isBranch() && !MI->isReturn()) ; ++MBBI) { - if (MBBI->isDebugInstr()) - continue; - - MachineInstr *NMI = &*MBBI; - MI = NMI; - - unsigned NPredReg = 0; - ARMCC::CondCodes NCC = getITInstrPredicate(*NMI, NPredReg); - if (NCC == CC || NCC == OCC) { - Mask |= (NCC & 1) << Pos; - // Add implicit use of ITSTATE. - NMI->addOperand(MachineOperand::CreateReg(ARM::ITSTATE, false/*ifDef*/, - true/*isImp*/, false/*isKill*/)); - LastITMI = NMI; - } else { - if (NCC == ARMCC::AL && - MoveCopyOutOfITBlock(NMI, CC, OCC, Defs, Uses)) { - --MBBI; - MBB.remove(NMI); - MBB.insert(InsertPos, NMI); - ClearKillFlags(MI, Uses); - ++NumMovedInsts; + // Insert an IT instruction. + MachineInstrBuilder MIB = + BuildMI(MBB, MBBI, dl, TII->get(ARM::t2IT)).addImm(CC); + + // Add implicit use of ITSTATE to IT block instructions. + MI->addOperand(MachineOperand::CreateReg( + ARM::ITSTATE, false /*ifDef*/, true /*isImp*/, false /*isKill*/)); + + MachineInstr *LastITMI = MI; + MachineBasicBlock::iterator InsertPos = MIB.getInstr(); + ++MBBI; + // Form IT block. + ARMCC::CondCodes OCC = ARMCC::getOppositeCondition(CC); + unsigned Mask = 0, Pos = 3; + + // v8 IT blocks are limited to one conditional op unless + // -arm-no-restrict-it + // is set: skip the loop + if (!restrictIT) { + // Branches, including tricky ones like LDM_RET, need to end an IT + // block so check the instruction we just put in the block. + for (; MBBI != E && Pos && (!MI->isBranch() && !MI->isReturn()); + ++MBBI) { + if (MBBI->isDebugInstr()) continue; + + MachineInstr *NMI = &*MBBI; + MI = NMI; + + unsigned NPredReg = 0; + ARMCC::CondCodes NCC = getITInstrPredicate(*NMI, NPredReg); + if (NCC == CC || NCC == OCC) { + Mask |= (NCC & 1) << Pos; + // Add implicit use of ITSTATE. + NMI->addOperand( + MachineOperand::CreateReg(ARM::ITSTATE, false /*ifDef*/, + true /*isImp*/, false /*isKill*/)); + LastITMI = NMI; + } else { + if (NCC == ARMCC::AL && + MoveCopyOutOfITBlock(NMI, CC, OCC, Defs, Uses)) { + --MBBI; + MBB.remove(NMI); + MBB.insert(InsertPos, NMI); + ClearKillFlags(MI, Uses); + ++NumMovedInsts; + continue; + } + break; } - break; + TrackDefUses(NMI, Defs, Uses, TRI); + --Pos; } - TrackDefUses(NMI, Defs, Uses, TRI); - --Pos; } - } - // Finalize IT mask. - Mask |= (1 << Pos); - // Tag along (firstcond[0] << 4) with the mask. - Mask |= (CC & 1) << 4; - MIB.addImm(Mask); + // Finalize IT mask. + Mask |= (1 << Pos); + // Tag along (firstcond[0] << 4) with the mask. + Mask |= (CC & 1) << 4; + MIB.addImm(Mask); + + // Last instruction in IT block kills ITSTATE. + LastITMI->findRegisterUseOperand(ARM::ITSTATE)->setIsKill(); + + // Finalize the bundle. + finalizeBundle(MBB, InsertPos.getInstrIterator(), + ++LastITMI->getIterator()); + + Modified = true; + ++NumITs; + } else if (getVPTInstrPredicate(*MI, PredReg) != ARMVCC::None) { + MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII->get(ARM::t2VPST)); + MachineInstr *LastITMI = MI; + MachineBasicBlock::iterator InsertPos = MIB.getInstr(); - // Last instruction in IT block kills ITSTATE. - LastITMI->findRegisterUseOperand(ARM::ITSTATE)->setIsKill(); + unsigned Mask = 8U; + //TODO: Try to merge compatible VPST blocks. + MIB.addImm(Mask); - // Finalize the bundle. - finalizeBundle(MBB, InsertPos.getInstrIterator(), - ++LastITMI->getIterator()); + // Finalize the bundle. + finalizeBundle(MBB, InsertPos.getInstrIterator(), + ++LastITMI->getIterator()); - Modified = true; - ++NumITs; + ++MBBI; + Modified = true; + } else { + ++MBBI; + continue; + } } return Modified; diff --git a/llvm/lib/Target/ARM/Thumb2InstrInfo.h b/llvm/lib/Target/ARM/Thumb2InstrInfo.h --- a/llvm/lib/Target/ARM/Thumb2InstrInfo.h +++ b/llvm/lib/Target/ARM/Thumb2InstrInfo.h @@ -68,6 +68,12 @@ /// to llvm::getInstrPredicate except it returns AL for conditional branch /// instructions which are "predicated", but are not in IT blocks. ARMCC::CondCodes getITInstrPredicate(const MachineInstr &MI, unsigned &PredReg); + +// getVPTInstrPredicate: VPT analogue of that, plus a helper function +// corresponding to MachineInstr::findFirstPredOperandIdx. +int findFirstVPTPredOperandIdx(const MachineInstr &MI); +ARMVCC::VPTCodes getVPTInstrPredicate(const MachineInstr &MI, + unsigned &PredReg); } #endif diff --git a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp --- a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp +++ b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp @@ -690,3 +690,28 @@ return ARMCC::AL; return getInstrPredicate(MI, PredReg); } + +int llvm::findFirstVPTPredOperandIdx(const MachineInstr &MI) { + const MCInstrDesc &MCID = MI.getDesc(); + + if (!MCID.OpInfo) + return -1; + + for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) + if (ARM::isVpred(MCID.OpInfo[i].OperandType)) + return i; + + return -1; +} + +ARMVCC::VPTCodes llvm::getVPTInstrPredicate(const MachineInstr &MI, + unsigned &PredReg) { + int PIdx = findFirstVPTPredOperandIdx(MI); + if (PIdx == -1) { + PredReg = 0; + return ARMVCC::None; + } + + PredReg = MI.getOperand(PIdx+1).getReg(); + return (ARMVCC::VPTCodes)MI.getOperand(PIdx).getImm(); +}