diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -343,7 +343,7 @@ addQRTypeForNEON(MVT::v8bf16); } - if (Subtarget->hasSVE() || Subtarget->hasSME()) { + if (Subtarget->hasSVEorSME()) { // Add legal sve predicate types addRegisterClass(MVT::nxv1i1, &AArch64::PPRRegClass); addRegisterClass(MVT::nxv2i1, &AArch64::PPRRegClass); @@ -1155,7 +1155,7 @@ // FIXME: Move lowering for more nodes here if those are common between // SVE and SME. - if (Subtarget->hasSVE() || Subtarget->hasSME()) { + if (Subtarget->hasSVEorSME()) { for (auto VT : {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1, MVT::nxv1i1}) { setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -3535,8 +3535,7 @@ // Copy a Predicate register by ORRing with itself. if (AArch64::PPRRegClass.contains(DestReg) && AArch64::PPRRegClass.contains(SrcReg)) { - assert((Subtarget.hasSVE() || Subtarget.hasSME()) && - "Unexpected SVE register."); + assert(Subtarget.hasSVEorSME() && "Unexpected SVE register."); BuildMI(MBB, I, DL, get(AArch64::ORR_PPzPP), DestReg) .addReg(SrcReg) // Pg .addReg(SrcReg) @@ -3547,8 +3546,7 @@ // Copy a Z register by ORRing with itself. if (AArch64::ZPRRegClass.contains(DestReg) && AArch64::ZPRRegClass.contains(SrcReg)) { - assert((Subtarget.hasSVE() || Subtarget.hasSME()) && - "Unexpected SVE register."); + assert(Subtarget.hasSVEorSME() && "Unexpected SVE register."); BuildMI(MBB, I, DL, get(AArch64::ORR_ZZZ), DestReg) .addReg(SrcReg) .addReg(SrcReg, getKillRegState(KillSrc)); @@ -3558,8 +3556,7 @@ // Copy a Z register pair by copying the individual sub-registers. if (AArch64::ZPR2RegClass.contains(DestReg) && AArch64::ZPR2RegClass.contains(SrcReg)) { - assert((Subtarget.hasSVE() || Subtarget.hasSME()) && - "Unexpected SVE register."); + assert(Subtarget.hasSVEorSME() && "Unexpected SVE register."); static const unsigned Indices[] = {AArch64::zsub0, AArch64::zsub1}; copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORR_ZZZ, Indices); @@ -3569,8 +3566,7 @@ // Copy a Z register triple by copying the individual sub-registers. if (AArch64::ZPR3RegClass.contains(DestReg) && AArch64::ZPR3RegClass.contains(SrcReg)) { - assert((Subtarget.hasSVE() || Subtarget.hasSME()) && - "Unexpected SVE register."); + assert(Subtarget.hasSVEorSME() && "Unexpected SVE register."); static const unsigned Indices[] = {AArch64::zsub0, AArch64::zsub1, AArch64::zsub2}; copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORR_ZZZ, @@ -3581,8 +3577,7 @@ // Copy a Z register quad by copying the individual sub-registers. if (AArch64::ZPR4RegClass.contains(DestReg) && AArch64::ZPR4RegClass.contains(SrcReg)) { - assert((Subtarget.hasSVE() || Subtarget.hasSME()) && - "Unexpected SVE register."); + assert(Subtarget.hasSVEorSME() && "Unexpected SVE register."); static const unsigned Indices[] = {AArch64::zsub0, AArch64::zsub1, AArch64::zsub2, AArch64::zsub3}; copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORR_ZZZ, diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -166,7 +166,7 @@ // A subset of SVE(2) instructions are legal in Streaming SVE execution mode, // they should be enabled if either has been specified. def HasSVEorSME - : Predicate<"Subtarget->hasSVE() || Subtarget->hasSME()">, + : Predicate<"Subtarget->hasSVEorSME()">, AssemblerPredicateWithAll<(any_of FeatureSVE, FeatureSME), "sve or sme">; def HasSVE2orSME diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h --- a/llvm/lib/Target/AArch64/AArch64Subtarget.h +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h @@ -361,16 +361,20 @@ void mirFileLoaded(MachineFunction &MF) const override; + bool hasSVEorSME() const { return hasSVE() || hasSME(); } + // Return the known range for the bit length of SVE data registers. A value // of 0 means nothing is known about that particular limit beyong what's // implied by the architecture. unsigned getMaxSVEVectorSizeInBits() const { - assert(HasSVE && "Tried to get SVE vector length without SVE support!"); + assert(hasSVEorSME() && + "Tried to get SVE vector length without SVE support!"); return MaxSVEVectorSizeInBits; } unsigned getMinSVEVectorSizeInBits() const { - assert(HasSVE && "Tried to get SVE vector length without SVE support!"); + assert(hasSVEorSME() && + "Tried to get SVE vector length without SVE support!"); return MinSVEVectorSizeInBits; } diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp --- a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp @@ -459,8 +459,8 @@ bool AArch64Subtarget::forceStreamingCompatibleSVE() const { if (ForceStreamingCompatibleSVE) { - assert((hasSVE() || hasSME()) && "Expected SVE to be available"); - return hasSVE() || hasSME(); + assert(hasSVEorSME() && "Expected SVE to be available"); + return hasSVEorSME(); } return false; }