diff --git a/llvm/include/llvm/CodeGen/MIRYamlMapping.h b/llvm/include/llvm/CodeGen/MIRYamlMapping.h --- a/llvm/include/llvm/CodeGen/MIRYamlMapping.h +++ b/llvm/include/llvm/CodeGen/MIRYamlMapping.h @@ -347,7 +347,7 @@ static void enumeration(yaml::IO &IO, TargetStackID::Value &ID) { IO.enumCase(ID, "default", TargetStackID::Default); IO.enumCase(ID, "sgpr-spill", TargetStackID::SGPRSpill); - IO.enumCase(ID, "sve-vec", TargetStackID::SVEVector); + IO.enumCase(ID, "scalable-vector", TargetStackID::ScalableVector); IO.enumCase(ID, "noalloc", TargetStackID::NoAlloc); } }; diff --git a/llvm/include/llvm/CodeGen/TargetFrameLowering.h b/llvm/include/llvm/CodeGen/TargetFrameLowering.h --- a/llvm/include/llvm/CodeGen/TargetFrameLowering.h +++ b/llvm/include/llvm/CodeGen/TargetFrameLowering.h @@ -27,7 +27,7 @@ enum Value { Default = 0, SGPRSpill = 1, - SVEVector = 2, + ScalableVector = 2, NoAlloc = 255 }; } diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/llvm/lib/Target/AArch64/AArch64FrameLowering.h --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.h +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.h @@ -107,7 +107,7 @@ default: return false; case TargetStackID::Default: - case TargetStackID::SVEVector: + case TargetStackID::ScalableVector: case TargetStackID::NoAlloc: return true; } @@ -116,7 +116,7 @@ bool isStackIdSafeForLocalArea(unsigned StackId) const override { // We don't support putting SVE objects into the pre-allocated local // frame block at the moment. - return StackId != TargetStackID::SVEVector; + return StackId != TargetStackID::ScalableVector; } void diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -249,7 +249,7 @@ TargetStackID::Value AArch64FrameLowering::getStackIDForScalableVectors() const { - return TargetStackID::SVEVector; + return TargetStackID::ScalableVector; } /// Returns the size of the fixed object area (allocated next to sp on entry) @@ -496,7 +496,7 @@ continue; StackOffset Offset; - if (MFI.getStackID(Info.getFrameIdx()) == TargetStackID::SVEVector) { + if (MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector) { AArch64FunctionInfo *AFI = MF.getInfo(); Offset = StackOffset::getScalable(MFI.getObjectOffset(Info.getFrameIdx())) - @@ -1856,7 +1856,7 @@ const auto &MFI = MF.getFrameInfo(); int64_t ObjectOffset = MFI.getObjectOffset(FI); bool isFixed = MFI.isFixedObjectIndex(FI); - bool isSVE = MFI.getStackID(FI) == TargetStackID::SVEVector; + bool isSVE = MFI.getStackID(FI) == TargetStackID::ScalableVector; return resolveFrameOffsetReference(MF, ObjectOffset, isFixed, isSVE, FrameReg, PreferFP, ForSimm); } @@ -2412,7 +2412,7 @@ // Update the StackIDs of the SVE stack slots. MachineFrameInfo &MFI = MF.getFrameInfo(); if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR) - MFI.setStackID(RPI.FrameIdx, TargetStackID::SVEVector); + MFI.setStackID(RPI.FrameIdx, TargetStackID::ScalableVector); } return true; @@ -2761,7 +2761,7 @@ #ifndef NDEBUG // First process all fixed stack objects. for (int I = MFI.getObjectIndexBegin(); I != 0; ++I) - assert(MFI.getStackID(I) != TargetStackID::SVEVector && + assert(MFI.getStackID(I) != TargetStackID::ScalableVector && "SVE vectors should never be passed on the stack by value, only by " "reference."); #endif @@ -2791,7 +2791,7 @@ SmallVector ObjectsToAllocate; for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) { unsigned StackID = MFI.getStackID(I); - if (StackID != TargetStackID::SVEVector) + if (StackID != TargetStackID::ScalableVector) continue; if (MaxCSFrameIndex >= I && I >= MinCSFrameIndex) continue; diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -5393,7 +5393,7 @@ Type *Ty = EVT(VA.getValVT()).getTypeForEVT(*DAG.getContext()); Align Alignment = DAG.getDataLayout().getPrefTypeAlign(Ty); int FI = MFI.CreateStackObject(StoreSize, Alignment, false); - MFI.setStackID(FI, TargetStackID::SVEVector); + MFI.setStackID(FI, TargetStackID::ScalableVector); MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -3291,7 +3291,7 @@ else if (AArch64::PPRRegClass.hasSubClassEq(RC)) { assert(Subtarget.hasSVE() && "Unexpected register store without SVE"); Opc = AArch64::STR_PXI; - StackID = TargetStackID::SVEVector; + StackID = TargetStackID::ScalableVector; } break; case 4: @@ -3335,7 +3335,7 @@ } else if (AArch64::ZPRRegClass.hasSubClassEq(RC)) { assert(Subtarget.hasSVE() && "Unexpected register store without SVE"); Opc = AArch64::STR_ZXI; - StackID = TargetStackID::SVEVector; + StackID = TargetStackID::ScalableVector; } break; case 24: @@ -3357,7 +3357,7 @@ } else if (AArch64::ZPR2RegClass.hasSubClassEq(RC)) { assert(Subtarget.hasSVE() && "Unexpected register store without SVE"); Opc = AArch64::STR_ZZXI; - StackID = TargetStackID::SVEVector; + StackID = TargetStackID::ScalableVector; } break; case 48: @@ -3368,7 +3368,7 @@ } else if (AArch64::ZPR3RegClass.hasSubClassEq(RC)) { assert(Subtarget.hasSVE() && "Unexpected register store without SVE"); Opc = AArch64::STR_ZZZXI; - StackID = TargetStackID::SVEVector; + StackID = TargetStackID::ScalableVector; } break; case 64: @@ -3379,7 +3379,7 @@ } else if (AArch64::ZPR4RegClass.hasSubClassEq(RC)) { assert(Subtarget.hasSVE() && "Unexpected register store without SVE"); Opc = AArch64::STR_ZZZZXI; - StackID = TargetStackID::SVEVector; + StackID = TargetStackID::ScalableVector; } break; } @@ -3445,7 +3445,7 @@ else if (AArch64::PPRRegClass.hasSubClassEq(RC)) { assert(Subtarget.hasSVE() && "Unexpected register load without SVE"); Opc = AArch64::LDR_PXI; - StackID = TargetStackID::SVEVector; + StackID = TargetStackID::ScalableVector; } break; case 4: @@ -3489,7 +3489,7 @@ } else if (AArch64::ZPRRegClass.hasSubClassEq(RC)) { assert(Subtarget.hasSVE() && "Unexpected register load without SVE"); Opc = AArch64::LDR_ZXI; - StackID = TargetStackID::SVEVector; + StackID = TargetStackID::ScalableVector; } break; case 24: @@ -3511,7 +3511,7 @@ } else if (AArch64::ZPR2RegClass.hasSubClassEq(RC)) { assert(Subtarget.hasSVE() && "Unexpected register load without SVE"); Opc = AArch64::LDR_ZZXI; - StackID = TargetStackID::SVEVector; + StackID = TargetStackID::ScalableVector; } break; case 48: @@ -3522,7 +3522,7 @@ } else if (AArch64::ZPR3RegClass.hasSubClassEq(RC)) { assert(Subtarget.hasSVE() && "Unexpected register load without SVE"); Opc = AArch64::LDR_ZZZXI; - StackID = TargetStackID::SVEVector; + StackID = TargetStackID::ScalableVector; } break; case 64: @@ -3533,7 +3533,7 @@ } else if (AArch64::ZPR4RegClass.hasSubClassEq(RC)) { assert(Subtarget.hasSVE() && "Unexpected register load without SVE"); Opc = AArch64::LDR_ZZZZXI; - StackID = TargetStackID::SVEVector; + StackID = TargetStackID::ScalableVector; } break; } diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp @@ -783,7 +783,7 @@ case TargetStackID::NoAlloc: case TargetStackID::SGPRSpill: return true; - case TargetStackID::SVEVector: + case TargetStackID::ScalableVector: return false; } llvm_unreachable("Invalid TargetStackID::Value");