diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -2519,8 +2519,8 @@ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); const TargetRegisterClass &RC = AArch64::GPR64RegClass; unsigned Size = TRI->getSpillSize(RC); - unsigned Align = TRI->getSpillAlignment(RC); - int FI = MFI.CreateStackObject(Size, Align, false); + Align Alignment = TRI->getSpillAlign(RC); + int FI = MFI.CreateStackObject(Size, Alignment, false); RS->addScavengingFrameIndex(FI); LLVM_DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI << " as the emergency spill slot.\n"); diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -3319,9 +3319,9 @@ return LowerFixedLengthVectorStoreToSVE(Op, DAG); unsigned AS = StoreNode->getAddressSpace(); - unsigned Align = StoreNode->getAlignment(); - if (Align < MemVT.getStoreSize() && - !allowsMisalignedMemoryAccesses(MemVT, AS, Align, + Align Alignment = StoreNode->getAlign(); + if (Alignment < MemVT.getStoreSize() && + !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment.value(), StoreNode->getMemOperand()->getFlags(), nullptr)) { return scalarizeVectorStore(StoreNode, DAG); @@ -4428,9 +4428,9 @@ "Only scalable vectors can be passed indirectly"); MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); Type *Ty = EVT(VA.getValVT()).getTypeForEVT(*DAG.getContext()); - unsigned Align = DAG.getDataLayout().getPrefTypeAlignment(Ty); + Align Alignment = DAG.getDataLayout().getPrefTypeAlign(Ty); int FI = MFI.CreateStackObject( - VA.getValVT().getStoreSize().getKnownMinSize(), Align, false); + VA.getValVT().getStoreSize().getKnownMinSize(), Alignment, false); MFI.setStackID(FI, TargetStackID::SVEVector); SDValue SpillSlot = DAG.getFrameIndex( @@ -6096,7 +6096,7 @@ SDLoc DL(Op); SDValue Chain = Op.getOperand(0); SDValue Addr = Op.getOperand(1); - unsigned Align = Op.getConstantOperandVal(3); + MaybeAlign Align(Op.getConstantOperandVal(3)); unsigned MinSlotSize = Subtarget->isTargetILP32() ? 4 : 8; auto PtrVT = getPointerTy(DAG.getDataLayout()); auto PtrMemVT = getPointerMemTy(DAG.getDataLayout()); @@ -6105,12 +6105,11 @@ Chain = VAList.getValue(1); VAList = DAG.getZExtOrTrunc(VAList, DL, PtrVT); - if (Align > MinSlotSize) { - assert(((Align & (Align - 1)) == 0) && "Expected Align to be a power of 2"); + if (Align && *Align > MinSlotSize) { VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, - DAG.getConstant(Align - 1, DL, PtrVT)); + DAG.getConstant(Align->value() - 1, DL, PtrVT)); VAList = DAG.getNode(ISD::AND, DL, PtrVT, VAList, - DAG.getConstant(-(int64_t)Align, DL, PtrVT)); + DAG.getConstant(-(int64_t)Align->value(), DL, PtrVT)); } Type *ArgTy = VT.getTypeForEVT(*DAG.getContext()); @@ -9111,7 +9110,7 @@ SDNode *Node = Op.getNode(); SDValue Chain = Op.getOperand(0); SDValue Size = Op.getOperand(1); - unsigned Align = cast(Op.getOperand(2))->getZExtValue(); + MaybeAlign Align(cast(Op.getOperand(2))->getZExtValue()); EVT VT = Node->getValueType(0); if (DAG.getMachineFunction().getFunction().hasFnAttribute( @@ -9121,7 +9120,7 @@ SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size); if (Align) SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), - DAG.getConstant(-(uint64_t)Align, dl, VT)); + DAG.getConstant(-(uint64_t)Align->value(), dl, VT)); Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP); SDValue Ops[2] = {SP, Chain}; return DAG.getMergeValues(Ops, dl); @@ -9136,7 +9135,7 @@ SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size); if (Align) SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), - DAG.getConstant(-(uint64_t)Align, dl, VT)); + DAG.getConstant(-(uint64_t)Align->value(), dl, VT)); Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP); Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true), diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp --- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp +++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp @@ -2144,8 +2144,9 @@ LLVM_DEBUG(dbgs() << "Reserving emergency spill slot\n"); const TargetRegisterClass &RC = ARM::GPRRegClass; unsigned Size = TRI->getSpillSize(RC); - unsigned Align = TRI->getSpillAlignment(RC); - RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false)); + Align Alignment = TRI->getSpillAlign(RC); + RS->addScavengingFrameIndex( + MFI.CreateStackObject(Size, Alignment, false)); } } } diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp --- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -1726,7 +1726,7 @@ EVT LoadedVT; unsigned Opcode = 0; bool isSExtLd, isPre; - unsigned Align; + Align Alignment; ARMVCC::VPTCodes Pred; SDValue PredReg; SDValue Chain, Base, Offset; @@ -1742,7 +1742,7 @@ Chain = LD->getChain(); Base = LD->getBasePtr(); Offset = LD->getOffset(); - Align = LD->getAlignment(); + Alignment = LD->getAlign(); isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD; isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC); Pred = ARMVCC::None; @@ -1758,7 +1758,7 @@ Chain = LD->getChain(); Base = LD->getBasePtr(); Offset = LD->getOffset(); - Align = LD->getAlignment(); + Alignment = LD->getAlign(); isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD; isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC); Pred = ARMVCC::Then; @@ -1772,7 +1772,7 @@ bool CanChangeType = Subtarget->isLittle() && !isa(N); SDValue NewOffset; - if (Align >= 2 && LoadedVT == MVT::v4i16 && + if (Alignment >= Align(2) && LoadedVT == MVT::v4i16 && SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 1)) { if (isSExtLd) Opcode = isPre ? ARM::MVE_VLDRHS32_pre : ARM::MVE_VLDRHS32_post; @@ -1790,12 +1790,12 @@ Opcode = isPre ? ARM::MVE_VLDRBS32_pre : ARM::MVE_VLDRBS32_post; else Opcode = isPre ? ARM::MVE_VLDRBU32_pre : ARM::MVE_VLDRBU32_post; - } else if (Align >= 4 && + } else if (Alignment >= Align(4) && (CanChangeType || LoadedVT == MVT::v4i32 || LoadedVT == MVT::v4f32) && SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 2)) Opcode = isPre ? ARM::MVE_VLDRWU32_pre : ARM::MVE_VLDRWU32_post; - else if (Align >= 2 && + else if (Alignment >= Align(2) && (CanChangeType || LoadedVT == MVT::v8i16 || LoadedVT == MVT::v8f16) && SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 1)) diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -16725,7 +16725,7 @@ return false; } -static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, unsigned Align, +static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, Align Alignment, bool isSEXTLoad, bool IsMasked, bool isLE, SDValue &Base, SDValue &Offset, bool &isInc, SelectionDAG &DAG) { @@ -16760,16 +16760,16 @@ // (in BE/masked) type. Base = Ptr->getOperand(0); if (VT == MVT::v4i16) { - if (Align >= 2 && IsInRange(RHSC, 0x80, 2)) + if (Alignment >= 2 && IsInRange(RHSC, 0x80, 2)) return true; } else if (VT == MVT::v4i8 || VT == MVT::v8i8) { if (IsInRange(RHSC, 0x80, 1)) return true; - } else if (Align >= 4 && + } else if (Alignment >= 4 && (CanChangeType || VT == MVT::v4i32 || VT == MVT::v4f32) && IsInRange(RHSC, 0x80, 4)) return true; - else if (Align >= 2 && + else if (Alignment >= 2 && (CanChangeType || VT == MVT::v8i16 || VT == MVT::v8f16) && IsInRange(RHSC, 0x80, 2)) return true; @@ -16791,28 +16791,28 @@ EVT VT; SDValue Ptr; - unsigned Align; + Align Alignment; bool isSEXTLoad = false; bool IsMasked = false; if (LoadSDNode *LD = dyn_cast(N)) { Ptr = LD->getBasePtr(); VT = LD->getMemoryVT(); - Align = LD->getAlignment(); + Alignment = LD->getAlign(); isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; } else if (StoreSDNode *ST = dyn_cast(N)) { Ptr = ST->getBasePtr(); VT = ST->getMemoryVT(); - Align = ST->getAlignment(); + Alignment = ST->getAlign(); } else if (MaskedLoadSDNode *LD = dyn_cast(N)) { Ptr = LD->getBasePtr(); VT = LD->getMemoryVT(); - Align = LD->getAlignment(); + Alignment = LD->getAlign(); isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; IsMasked = true; } else if (MaskedStoreSDNode *ST = dyn_cast(N)) { Ptr = ST->getBasePtr(); VT = ST->getMemoryVT(); - Align = ST->getAlignment(); + Alignment = ST->getAlign(); IsMasked = true; } else return false; @@ -16821,9 +16821,9 @@ bool isLegal = false; if (VT.isVector()) isLegal = Subtarget->hasMVEIntegerOps() && - getMVEIndexedAddressParts(Ptr.getNode(), VT, Align, isSEXTLoad, - IsMasked, Subtarget->isLittle(), Base, - Offset, isInc, DAG); + getMVEIndexedAddressParts( + Ptr.getNode(), VT, Alignment, isSEXTLoad, IsMasked, + Subtarget->isLittle(), Base, Offset, isInc, DAG); else { if (Subtarget->isThumb2()) isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, @@ -16849,31 +16849,31 @@ SelectionDAG &DAG) const { EVT VT; SDValue Ptr; - unsigned Align; + Align Alignment; bool isSEXTLoad = false, isNonExt; bool IsMasked = false; if (LoadSDNode *LD = dyn_cast(N)) { VT = LD->getMemoryVT(); Ptr = LD->getBasePtr(); - Align = LD->getAlignment(); + Alignment = LD->getAlign(); isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; } else if (StoreSDNode *ST = dyn_cast(N)) { VT = ST->getMemoryVT(); Ptr = ST->getBasePtr(); - Align = ST->getAlignment(); + Alignment = ST->getAlign(); isNonExt = !ST->isTruncatingStore(); } else if (MaskedLoadSDNode *LD = dyn_cast(N)) { VT = LD->getMemoryVT(); Ptr = LD->getBasePtr(); - Align = LD->getAlignment(); + Alignment = LD->getAlign(); isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; IsMasked = true; } else if (MaskedStoreSDNode *ST = dyn_cast(N)) { VT = ST->getMemoryVT(); Ptr = ST->getBasePtr(); - Align = ST->getAlignment(); + Alignment = ST->getAlign(); isNonExt = !ST->isTruncatingStore(); IsMasked = true; } else @@ -16899,7 +16899,7 @@ bool isLegal = false; if (VT.isVector()) isLegal = Subtarget->hasMVEIntegerOps() && - getMVEIndexedAddressParts(Op, VT, Align, isSEXTLoad, IsMasked, + getMVEIndexedAddressParts(Op, VT, Alignment, isSEXTLoad, IsMasked, Subtarget->isLittle(), Base, Offset, isInc, DAG); else { @@ -17657,13 +17657,14 @@ if (DAG.getMachineFunction().getFunction().hasFnAttribute( "no-stack-arg-probe")) { - unsigned Align = cast(Op.getOperand(2))->getZExtValue(); + MaybeAlign Align(cast(Op.getOperand(2))->getZExtValue()); SDValue SP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); Chain = SP.getValue(1); SP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size); if (Align) - SP = DAG.getNode(ISD::AND, DL, MVT::i32, SP.getValue(0), - DAG.getConstant(-(uint64_t)Align, DL, MVT::i32)); + SP = + DAG.getNode(ISD::AND, DL, MVT::i32, SP.getValue(0), + DAG.getConstant(-(uint64_t)Align->value(), DL, MVT::i32)); Chain = DAG.getCopyToReg(Chain, DL, ARM::SP, SP); SDValue Ops[2] = { SP, Chain }; return DAG.getMergeValues(Ops, DL); diff --git a/llvm/lib/Target/Hexagon/HexagonVExtract.cpp b/llvm/lib/Target/Hexagon/HexagonVExtract.cpp --- a/llvm/lib/Target/Hexagon/HexagonVExtract.cpp +++ b/llvm/lib/Target/Hexagon/HexagonVExtract.cpp @@ -107,7 +107,7 @@ Register AR = MF.getInfo()->getStackAlignBaseVReg(); std::map> VExtractMap; - unsigned MaxAlign = 0; + Align MaxAlign; bool Changed = false; for (MachineBasicBlock &MBB : MF) { @@ -137,14 +137,14 @@ continue; const auto &VecRC = *MRI.getRegClass(VecR); - unsigned Align = HRI.getSpillAlignment(VecRC); - MaxAlign = std::max(MaxAlign, Align); + Align Alignment = HRI.getSpillAlign(VecRC); + MaxAlign = std::max(MaxAlign, Alignment); // Make sure this is not a spill slot: spill slots cannot be aligned // if there are variable-sized objects on the stack. They must be // accessible via FP (which is not aligned), because SP is unknown, // and AP may not be available at the location of the load/store. - int FI = MFI.CreateStackObject(HRI.getSpillSize(VecRC), Align, - /*isSpillSlot*/false); + int FI = MFI.CreateStackObject(HRI.getSpillSize(VecRC), Alignment, + /*isSpillSlot*/ false); MachineInstr *DefI = MRI.getVRegDef(VecR); MachineBasicBlock::iterator At = std::next(DefI->getIterator()); @@ -184,7 +184,7 @@ assert(AlignaI->getOpcode() == Hexagon::PS_aligna); MachineOperand &Op = AlignaI->getOperand(1); if (MaxAlign > Op.getImm()) - Op.setImm(MaxAlign); + Op.setImm(MaxAlign.value()); } return Changed; diff --git a/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp --- a/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp +++ b/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp @@ -50,7 +50,7 @@ const BlockAddress *BlockAddr = nullptr; const char *ES = nullptr; int JT = -1; - unsigned Align = 0; // CP alignment. + Align Alignment; // CP alignment. MSP430ISelAddressMode() = default; @@ -74,12 +74,12 @@ } else if (CP) { errs() << " CP "; CP->dump(); - errs() << " Align" << Align << '\n'; + errs() << " Align" << Alignment.value() << '\n'; } else if (ES) { errs() << "ES "; errs() << ES << '\n'; } else if (JT != -1) - errs() << " JT" << JT << " Align" << Align << '\n'; + errs() << " JT" << JT << " Align" << Alignment.value() << '\n'; } #endif }; @@ -146,7 +146,7 @@ //AM.SymbolFlags = G->getTargetFlags(); } else if (ConstantPoolSDNode *CP = dyn_cast(N0)) { AM.CP = CP->getConstVal(); - AM.Align = CP->getAlign().value(); + AM.Alignment = CP->getAlign(); AM.Disp += CP->getOffset(); //AM.SymbolFlags = CP->getTargetFlags(); } else if (ExternalSymbolSDNode *S = dyn_cast(N0)) { @@ -263,8 +263,8 @@ MVT::i16, AM.Disp, 0/*AM.SymbolFlags*/); else if (AM.CP) - Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i16, Align(AM.Align), - AM.Disp, 0 /*AM.SymbolFlags*/); + Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i16, AM.Alignment, AM.Disp, + 0 /*AM.SymbolFlags*/); else if (AM.ES) Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i16, 0/*AM.SymbolFlags*/); else if (AM.JT != -1) diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -2302,10 +2302,10 @@ MemSDNode *MemSD = cast(N); const DataLayout &TD = DAG.getDataLayout(); - unsigned Align = MemSD->getAlignment(); - unsigned PrefAlign = - TD.getPrefTypeAlignment(ValVT.getTypeForEVT(*DAG.getContext())); - if (Align < PrefAlign) { + Align Alignment = MemSD->getAlign(); + Align PrefAlign = + TD.getPrefTypeAlign(ValVT.getTypeForEVT(*DAG.getContext())); + if (Alignment < PrefAlign) { // This store is not sufficiently aligned, so bail out and let this vector // store be scalarized. Note that we may still be able to emit smaller // vector stores. For example, if we are storing a <4 x float> with an @@ -4791,11 +4791,10 @@ LoadSDNode *LD = cast(N); - unsigned Align = LD->getAlignment(); + Align Alignment = LD->getAlign(); auto &TD = DAG.getDataLayout(); - unsigned PrefAlign = - TD.getPrefTypeAlignment(ResVT.getTypeForEVT(*DAG.getContext())); - if (Align < PrefAlign) { + Align PrefAlign = TD.getPrefTypeAlign(ResVT.getTypeForEVT(*DAG.getContext())); + if (Alignment < PrefAlign) { // This load is not sufficiently aligned, so bail out and let this vector // load be scalarized. Note that we may still be able to emit smaller // vector loads. For example, if we are loading a <4 x float> with an