diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h --- a/llvm/include/llvm/CodeGen/SelectionDAG.h +++ b/llvm/include/llvm/CodeGen/SelectionDAG.h @@ -2136,11 +2136,6 @@ /// Compute the default alignment value for the given type. Align getEVTAlign(EVT MemoryVT) const; - /// Compute the default alignment value for the given type. - /// FIXME: Remove once transition to Align is over. - inline unsigned getEVTAlignment(EVT MemoryVT) const { - return getEVTAlign(MemoryVT).value(); - } /// Test whether the given value is a constant int or similar node. SDNode *isConstantIntBuildVectorOrConstantInt(SDValue N) const; diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h --- a/llvm/include/llvm/IR/IntrinsicInst.h +++ b/llvm/include/llvm/IR/IntrinsicInst.h @@ -759,11 +759,6 @@ setArgOperand(ARG_DEST, Ptr); } - /// FIXME: Remove this function once transition to Align is over. - /// Use the version that takes MaybeAlign instead of this one. - void setDestAlignment(unsigned Alignment) { - setDestAlignment(MaybeAlign(Alignment)); - } void setDestAlignment(MaybeAlign Alignment) { removeParamAttr(ARG_DEST, Attribute::Alignment); if (Alignment) diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -18159,7 +18159,7 @@ while (NumConsecutiveStores >= 2) { LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode; unsigned FirstStoreAS = FirstInChain->getAddressSpace(); - unsigned FirstStoreAlign = FirstInChain->getAlignment(); + Align FirstStoreAlign = FirstInChain->getAlign(); unsigned LastLegalType = 1; unsigned LastLegalVectorType = 1; bool LastIntegerTrunc = false; @@ -18247,7 +18247,7 @@ unsigned NumSkip = 1; while ((NumSkip < NumConsecutiveStores) && (NumSkip < FirstZeroAfterNonZero) && - (StoreNodes[NumSkip].MemNode->getAlignment() <= FirstStoreAlign)) + (StoreNodes[NumSkip].MemNode->getAlign() <= FirstStoreAlign)) NumSkip++; StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumSkip); @@ -18286,7 +18286,7 @@ while (NumConsecutiveStores >= 2) { LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode; unsigned FirstStoreAS = FirstInChain->getAddressSpace(); - unsigned FirstStoreAlign = FirstInChain->getAlignment(); + Align FirstStoreAlign = FirstInChain->getAlign(); unsigned NumStoresToMerge = 1; for (unsigned i = 0; i < NumConsecutiveStores; ++i) { // Find a legal type for the vector store. @@ -18317,7 +18317,7 @@ // improved. Drop as many candidates as we can here. unsigned NumSkip = 1; while ((NumSkip < NumConsecutiveStores) && - (StoreNodes[NumSkip].MemNode->getAlignment() <= FirstStoreAlign)) + (StoreNodes[NumSkip].MemNode->getAlign() <= FirstStoreAlign)) NumSkip++; StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumSkip); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -6303,7 +6303,7 @@ // Allow wider loads if they are sufficiently aligned to avoid memory faults // and if the original load is simple. unsigned LdAlign = - (!LD->isSimple() || LdVT.isScalableVector()) ? 0 : LD->getAlignment(); + (!LD->isSimple() || LdVT.isScalableVector()) ? 0 : LD->getAlign().value(); // Find the vector type that can load from. Optional FirstVT = diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp --- a/llvm/lib/IR/IRBuilder.cpp +++ b/llvm/lib/IR/IRBuilder.cpp @@ -149,7 +149,7 @@ CallInst *CI = createCallHelper(TheFn, Ops, this); if (Align) - cast(CI)->setDestAlignment(Align->value()); + cast(CI)->setDestAlignment(*Align); // Set the TBAA info if present. if (TBAATag) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -14106,7 +14106,7 @@ !cast(N0)->isVolatile()) { LoadSDNode *LN0 = cast(N0); SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(), - LN0->getPointerInfo(), LN0->getAlignment(), + LN0->getPointerInfo(), LN0->getAlign(), LN0->getMemOperand()->getFlags()); // Make sure successors of the original load stay after it by updating them @@ -16402,7 +16402,7 @@ static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St, SDValue SplatVal, unsigned NumVecElts) { assert(!St.isTruncatingStore() && "cannot split truncating vector store"); - unsigned OrigAlignment = St.getAlignment(); + Align OrigAlignment = St.getAlign(); unsigned EltOffset = SplatVal.getValueType().getSizeInBits() / 8; // Create scalar stores. This is at least as good as the code sequence for a @@ -16427,7 +16427,7 @@ unsigned Offset = EltOffset; while (--NumVecElts) { - unsigned Alignment = MinAlign(OrigAlignment, Offset); + Align Alignment = commonAlignment(OrigAlignment, Offset); SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, DAG.getConstant(BaseOffset + Offset, DL, MVT::i64)); @@ -16761,8 +16761,8 @@ // extensions can use this to mark that it does not want splitting to happen // (by underspecifying alignment to be 1 or 2). Furthermore, the chance of // eliminating alignment hazards is only 1 in 8 for alignment of 2. - if (VT.getSizeInBits() != 128 || S->getAlignment() >= 16 || - S->getAlignment() <= 2) + if (VT.getSizeInBits() != 128 || S->getAlign() >= Align(16) || + S->getAlign() <= Align(2)) return SDValue(); // If we get a splat of a scalar convert this vector store to a store of @@ -16783,11 +16783,11 @@ SDValue BasePtr = S->getBasePtr(); SDValue NewST1 = DAG.getStore(S->getChain(), DL, SubVector0, BasePtr, S->getPointerInfo(), - S->getAlignment(), S->getMemOperand()->getFlags()); + S->getAlign(), S->getMemOperand()->getFlags()); SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, DAG.getConstant(8, DL, MVT::i64)); return DAG.getStore(NewST1.getValue(0), DL, SubVector1, OffsetPtr, - S->getPointerInfo(), S->getAlignment(), + S->getPointerInfo(), S->getAlign(), S->getMemOperand()->getFlags()); } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -2847,26 +2847,15 @@ bool AMDGPUDAGToDAGISel::isUniformLoad(const SDNode * N) const { auto Ld = cast(N); - return Ld->getAlignment() >= 4 && - ( - ( - ( - Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || - Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT - ) - && - !N->isDivergent() - ) - || - ( - Subtarget->getScalarizeGlobalBehavior() && - Ld->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && - Ld->isSimple() && - !N->isDivergent() && - static_cast( - getTargetLowering())->isMemOpHasNoClobberedMemOperand(N) - ) - ); + return Ld->getAlign() >= Align(4) && + (((Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || + Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && + !N->isDivergent()) || + (Subtarget->getScalarizeGlobalBehavior() && + Ld->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && + Ld->isSimple() && !N->isDivergent() && + static_cast(getTargetLowering()) + ->isMemOpHasNoClobberedMemOperand(N))); } void AMDGPUDAGToDAGISel::PostprocessISelDAG() { diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -650,11 +650,11 @@ unsigned AS = MN->getAddressSpace(); // Do not shrink an aligned scalar load to sub-dword. // Scalar engine cannot do sub-dword loads. - if (OldSize >= 32 && NewSize < 32 && MN->getAlignment() >= 4 && + if (OldSize >= 32 && NewSize < 32 && MN->getAlign() >= Align(4) && (AS == AMDGPUAS::CONSTANT_ADDRESS || AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || - (isa(N) && - AS == AMDGPUAS::GLOBAL_ADDRESS && MN->isInvariant())) && + (isa(N) && AS == AMDGPUAS::GLOBAL_ADDRESS && + MN->isInvariant())) && AMDGPUInstrInfo::isUniformMMO(MN->getMemOperand())) return false; @@ -1456,8 +1456,8 @@ std::tie(Lo, Hi) = splitVector(Op, SL, LoVT, HiVT, DAG); unsigned Size = LoMemVT.getStoreSize(); - unsigned BaseAlign = Load->getAlignment(); - unsigned HiAlign = MinAlign(BaseAlign, Size); + Align BaseAlign = Load->getAlign(); + Align HiAlign = commonAlignment(BaseAlign, Size); SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT, Load->getChain(), BasePtr, SrcValue, LoMemVT, @@ -1495,13 +1495,13 @@ EVT MemVT = Load->getMemoryVT(); SDLoc SL(Op); const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo(); - unsigned BaseAlign = Load->getAlignment(); + Align BaseAlign = Load->getAlign(); unsigned NumElements = MemVT.getVectorNumElements(); // Widen from vec3 to vec4 when the load is at least 8-byte aligned // or 16-byte fully dereferenceable. Otherwise, split the vector load. if (NumElements != 3 || - (BaseAlign < 8 && + (BaseAlign < Align(8) && !SrcValue.isDereferenceable(16, *DAG.getContext(), DAG.getDataLayout()))) return SplitVectorLoad(Op, DAG); @@ -1548,9 +1548,9 @@ SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, LoMemVT.getStoreSize()); const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo(); - unsigned BaseAlign = Store->getAlignment(); + Align BaseAlign = Store->getAlign(); unsigned Size = LoMemVT.getStoreSize(); - unsigned HiAlign = MinAlign(BaseAlign, Size); + Align HiAlign = commonAlignment(BaseAlign, Size); SDValue LoStore = DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign, diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp @@ -1108,9 +1108,9 @@ SDValue NewChain = DAG.getNode(AMDGPUISD::DUMMY_CHAIN, DL, MVT::Other, Chain); // TODO: can the chain be replaced without creating a new store? SDValue NewStore = DAG.getTruncStore( - NewChain, DL, Value, Ptr, StoreNode->getPointerInfo(), - MemVT, StoreNode->getAlignment(), - StoreNode->getMemOperand()->getFlags(), StoreNode->getAAInfo()); + NewChain, DL, Value, Ptr, StoreNode->getPointerInfo(), MemVT, + StoreNode->getAlign(), StoreNode->getMemOperand()->getFlags(), + StoreNode->getAAInfo()); StoreNode = cast(NewStore); } @@ -1362,7 +1362,7 @@ assert(!MemVT.isVector() && (MemVT == MVT::i16 || MemVT == MVT::i8)); SDValue NewLoad = DAG.getExtLoad( ISD::EXTLOAD, DL, VT, Chain, Ptr, LoadNode->getPointerInfo(), MemVT, - LoadNode->getAlignment(), LoadNode->getMemOperand()->getFlags()); + LoadNode->getAlign(), LoadNode->getMemOperand()->getFlags()); SDValue Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, NewLoad, DAG.getValueType(MemVT)); @@ -1659,7 +1659,7 @@ if (LoadNode->getMemoryVT().getScalarType() != MVT::i32 || !ISD::isNON_EXTLoad(LoadNode)) return SDValue(); - if (LoadNode->getAlignment() < 4) + if (LoadNode->getAlign() < Align(4)) return SDValue(); int ConstantBlock = ConstantAddressBlock(Block); diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -8362,7 +8362,7 @@ SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; - if (Ld->getAlignment() < 4 || Ld->isDivergent()) + if (Ld->getAlign() < Align(4) || Ld->isDivergent()) return SDValue(); // FIXME: Constant loads should all be marked invariant. @@ -8387,14 +8387,11 @@ // TODO: Drop only high part of range. SDValue Ptr = Ld->getBasePtr(); - SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, - MVT::i32, SL, Ld->getChain(), Ptr, - Ld->getOffset(), - Ld->getPointerInfo(), MVT::i32, - Ld->getAlignment(), - Ld->getMemOperand()->getFlags(), - Ld->getAAInfo(), - nullptr); // Drop ranges + SDValue NewLoad = DAG.getLoad( + ISD::UNINDEXED, ISD::NON_EXTLOAD, MVT::i32, SL, Ld->getChain(), Ptr, + Ld->getOffset(), Ld->getPointerInfo(), MVT::i32, Ld->getAlign(), + Ld->getMemOperand()->getFlags(), Ld->getAAInfo(), + nullptr); // Drop ranges EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()); if (MemVT.isFloatingPoint()) { @@ -8483,11 +8480,10 @@ assert(Op.getValueType().getVectorElementType() == MVT::i32 && "Custom lowering for non-i32 vectors hasn't been implemented."); - unsigned Alignment = Load->getAlignment(); + Align Alignment = Load->getAlign(); unsigned AS = Load->getAddressSpace(); - if (Subtarget->hasLDSMisalignedBug() && - AS == AMDGPUAS::FLAT_ADDRESS && - Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) { + if (Subtarget->hasLDSMisalignedBug() && AS == AMDGPUAS::FLAT_ADDRESS && + Alignment.value() < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) { return SplitVectorLoad(Op, DAG); } @@ -8504,7 +8500,7 @@ if (AS == AMDGPUAS::CONSTANT_ADDRESS || AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) { - if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) { + if (!Op->isDivergent() && Alignment >= Align(4) && NumElements < 32) { if (MemVT.isPow2VectorType()) return SDValue(); return WidenOrSplitVectorLoad(Op, DAG); @@ -8520,7 +8516,7 @@ AS == AMDGPUAS::GLOBAL_ADDRESS) { if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() && Load->isSimple() && isMemOpHasNoClobberedMemOperand(Load) && - Alignment >= 4 && NumElements < 32) { + Alignment >= Align(4) && NumElements < 32) { if (MemVT.isPow2VectorType()) return SDValue(); return WidenOrSplitVectorLoad(Op, DAG); @@ -9025,7 +9021,7 @@ unsigned AS = Store->getAddressSpace(); if (Subtarget->hasLDSMisalignedBug() && AS == AMDGPUAS::FLAT_ADDRESS && - Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) { + Store->getAlign().value() < VT.getStoreSize() && VT.getSizeInBits() > 32) { return SplitVectorStore(Op, DAG); } diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp --- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -1058,15 +1058,15 @@ MemN->getConstantOperandVal(MemN->getNumOperands() - 1) == 1)) { // This case occurs only for VLD1-lane/dup and VST1-lane instructions. // The maximum alignment is equal to the memory size being referenced. - unsigned MMOAlign = MemN->getAlignment(); + llvm::Align MMOAlign = MemN->getAlign(); unsigned MemSize = MemN->getMemoryVT().getSizeInBits() / 8; - if (MMOAlign >= MemSize && MemSize > 1) + if (MMOAlign.value() >= MemSize && MemSize > 1) Alignment = MemSize; } else { // All other uses of addrmode6 are for intrinsics. For now just record // the raw alignment value; it will be refined later based on the legal // alignment operands for the intrinsic. - Alignment = MemN->getAlignment(); + Alignment = MemN->getAlign().value(); } Align = CurDAG->getTargetConstant(Alignment, SDLoc(N), MVT::i32); diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -5553,7 +5553,7 @@ if (LoadSDNode *Ld = dyn_cast(Op)) return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(), - Ld->getPointerInfo(), Ld->getAlignment(), + Ld->getPointerInfo(), Ld->getAlign(), Ld->getMemOperand()->getFlags()); llvm_unreachable("Unknown VFP cmp argument!"); @@ -5573,14 +5573,14 @@ SDValue Ptr = Ld->getBasePtr(); RetVal1 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(), - Ld->getAlignment(), Ld->getMemOperand()->getFlags()); + Ld->getAlign(), Ld->getMemOperand()->getFlags()); EVT PtrType = Ptr.getValueType(); - unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); SDValue NewPtr = DAG.getNode(ISD::ADD, dl, PtrType, Ptr, DAG.getConstant(4, dl, PtrType)); RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr, - Ld->getPointerInfo().getWithOffset(4), NewAlign, + Ld->getPointerInfo().getWithOffset(4), + commonAlignment(Ld->getAlign(), 4), Ld->getMemOperand()->getFlags()); return; } @@ -9363,15 +9363,15 @@ // The load already has the right type. if (ExtendedTy == LD->getMemoryVT()) return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(), - LD->getBasePtr(), LD->getPointerInfo(), - LD->getAlignment(), LD->getMemOperand()->getFlags()); + LD->getBasePtr(), LD->getPointerInfo(), LD->getAlign(), + LD->getMemOperand()->getFlags()); // We need to create a zextload/sextload. We cannot just create a load // followed by a zext/zext node because LowerMUL is also run during normal // operation legalization where we can't create illegal types. return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy, LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(), - LD->getMemoryVT(), LD->getAlignment(), + LD->getMemoryVT(), LD->getAlign(), LD->getMemOperand()->getFlags()); } @@ -14787,14 +14787,14 @@ SDValue BasePtr = LD->getBasePtr(); SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(), - LD->getAlignment(), LD->getMemOperand()->getFlags()); + LD->getAlign(), LD->getMemOperand()->getFlags()); SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, DAG.getConstant(4, DL, MVT::i32)); SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, LD->getChain(), OffsetPtr, LD->getPointerInfo().getWithOffset(4), - std::min(4U, LD->getAlignment()), + commonAlignment(LD->getAlign(), 4), LD->getMemOperand()->getFlags()); DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); @@ -15737,7 +15737,7 @@ // Now, create a _UPD node, taking care of not breaking alignment. EVT AlignedVecTy = VecTy; - unsigned Alignment = MemN->getAlignment(); + Align Alignment = MemN->getAlign(); // If this is a less-than-standard-aligned load/store, change the type to // match the standard alignment. @@ -15754,10 +15754,8 @@ // memory type to match the explicit alignment. That way, we don't // generate non-standard-aligned ARMISD::VLDx nodes. if (isa(N)) { - if (Alignment == 0) - Alignment = 1; - if (Alignment < VecTy.getScalarSizeInBits() / 8) { - MVT EltTy = MVT::getIntegerVT(Alignment * 8); + if (Alignment.value() < VecTy.getScalarSizeInBits() / 8) { + MVT EltTy = MVT::getIntegerVT(Alignment.value() * 8); assert(NumVecs == 1 && "Unexpected multi-element generic load/store."); assert(!isLaneOp && "Unexpected generic load/store lane."); unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8); @@ -15770,7 +15768,7 @@ // alignment of the memory type. // Intrinsics, however, always get an explicit alignment, set to the // alignment of the MMO. - Alignment = 1; + Alignment = Align(1); } // Create the new updating load/store node. @@ -15803,7 +15801,7 @@ } // For all node types, the alignment operand is always the last one. - Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32)); + Ops.push_back(DAG.getConstant(Alignment.value(), dl, MVT::i32)); // If this is a non-standard-aligned STORE, the penultimate operand is the // stored value. Bitcast it to the aligned type. @@ -16274,7 +16272,7 @@ if (LD && Op.hasOneUse() && LD->isUnindexed() && LD->getMemoryVT() == N->getValueType(0).getVectorElementType()) { SDValue Ops[] = {LD->getOperand(0), LD->getOperand(1), - DAG.getConstant(LD->getAlignment(), SDLoc(N), MVT::i32)}; + DAG.getConstant(LD->getAlign().value(), SDLoc(N), MVT::i32)}; SDVTList SDTys = DAG.getVTList(N->getValueType(0), MVT::Other); SDValue VLDDup = DAG.getMemIntrinsicNode(ARMISD::VLD1DUP, SDLoc(N), SDTys, Ops, @@ -16376,7 +16374,7 @@ ShuffWide, DAG.getIntPtrConstant(I, DL)); SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr, St->getPointerInfo(), - St->getAlignment(), St->getMemOperand()->getFlags()); + St->getAlign(), St->getMemOperand()->getFlags()); BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, Increment); Chains.push_back(Ch); @@ -16624,7 +16622,7 @@ DCI.AddToWorklist(ExtElt.getNode()); DCI.AddToWorklist(V.getNode()); return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), - St->getPointerInfo(), St->getAlignment(), + St->getPointerInfo(), St->getAlign(), St->getMemOperand()->getFlags(), St->getAAInfo()); } @@ -21332,7 +21330,7 @@ SmallVector Ops; Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr)); - Ops.push_back(Builder.getInt32(LI->getAlignment())); + Ops.push_back(Builder.getInt32(LI->getAlign().value())); return Builder.CreateCall(VldnFunc, Ops, "vldN"); } else { @@ -21502,7 +21500,7 @@ SmallVector Ops; Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr)); append_range(Ops, Shuffles); - Ops.push_back(Builder.getInt32(SI->getAlignment())); + Ops.push_back(Builder.getInt32(SI->getAlign().value())); Builder.CreateCall(VstNFunc, Ops); } else { assert((Factor == 2 || Factor == 4) && diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -1581,7 +1581,7 @@ } bool HexagonDAGToDAGISel::isAlignedMemNode(const MemSDNode *N) const { - return N->getAlignment() >= N->getMemoryVT().getStoreSize(); + return N->getAlign().value() >= N->getMemoryVT().getStoreSize(); } bool HexagonDAGToDAGISel::isSmallStackStore(const StoreSDNode *N) const { diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -2678,7 +2678,7 @@ return Op; // Return if load is aligned or if MemVT is neither i32 nor i64. - if ((LD->getAlignment() >= MemVT.getSizeInBits() / 8) || + if ((LD->getAlign().value() >= (MemVT.getSizeInBits() / 8)) || ((MemVT != MVT::i32) && (MemVT != MVT::i64))) return SDValue(); @@ -2792,7 +2792,7 @@ SDValue Tr = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Val), FPTy, Val.getOperand(0)); return DAG.getStore(SD->getChain(), SDLoc(SD), Tr, SD->getBasePtr(), - SD->getPointerInfo(), SD->getAlignment(), + SD->getPointerInfo(), SD->getAlign(), SD->getMemOperand()->getFlags()); } @@ -2802,7 +2802,7 @@ // Lower unaligned integer stores. if (!Subtarget.systemSupportsUnalignedAccess() && - (SD->getAlignment() < MemVT.getSizeInBits() / 8) && + (SD->getAlign().value() < (MemVT.getSizeInBits() / 8)) && ((MemVT == MVT::i32) || (MemVT == MVT::i64))) return lowerUnalignedIntStore(SD, DAG, Subtarget.isLittle()); diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp @@ -1177,13 +1177,13 @@ // i32 load from lower address. SDValue Lo = DAG.getLoad(MVT::i32, DL, Chain, Ptr, MachinePointerInfo(), - Nd.getAlignment(), Nd.getMemOperand()->getFlags()); + Nd.getAlign(), Nd.getMemOperand()->getFlags()); // i32 load from higher address. Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, DL, PtrVT)); SDValue Hi = DAG.getLoad( MVT::i32, DL, Lo.getValue(1), Ptr, MachinePointerInfo(), - std::min(Nd.getAlignment(), 4U), Nd.getMemOperand()->getFlags()); + commonAlignment(Nd.getAlign(), 4), Nd.getMemOperand()->getFlags()); if (!Subtarget.isLittle()) std::swap(Lo, Hi); @@ -1212,14 +1212,13 @@ std::swap(Lo, Hi); // i32 store to lower address. - Chain = - DAG.getStore(Chain, DL, Lo, Ptr, MachinePointerInfo(), Nd.getAlignment(), - Nd.getMemOperand()->getFlags(), Nd.getAAInfo()); + Chain = DAG.getStore(Chain, DL, Lo, Ptr, MachinePointerInfo(), Nd.getAlign(), + Nd.getMemOperand()->getFlags(), Nd.getAAInfo()); // i32 store to higher address. Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, DL, PtrVT)); return DAG.getStore(Chain, DL, Hi, Ptr, MachinePointerInfo(), - std::min(Nd.getAlignment(), 4U), + commonAlignment(Nd.getAlign(), 4), Nd.getMemOperand()->getFlags(), Nd.getAAInfo()); } diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -2244,7 +2244,7 @@ assert(Node->getValueType(0) == MVT::i1 && "Custom lowering for i1 load only"); SDValue newLD = DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(), - LD->getPointerInfo(), LD->getAlignment(), + LD->getPointerInfo(), LD->getAlign(), LD->getMemOperand()->getFlags()); SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD); // The legalizer (the caller) is expecting two values from the legalized @@ -2409,7 +2409,7 @@ Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3); SDValue Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8, - ST->getAlignment(), ST->getMemOperand()->getFlags()); + ST->getAlign(), ST->getMemOperand()->getFlags()); return Result; } diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -2984,15 +2984,15 @@ bool isLoad = true; SDValue Ptr; EVT VT; - unsigned Alignment; + Align Alignment; if (LoadSDNode *LD = dyn_cast(N)) { Ptr = LD->getBasePtr(); VT = LD->getMemoryVT(); - Alignment = LD->getAlignment(); + Alignment = LD->getAlign(); } else if (StoreSDNode *ST = dyn_cast(N)) { Ptr = ST->getBasePtr(); VT = ST->getMemoryVT(); - Alignment = ST->getAlignment(); + Alignment = ST->getAlign(); isLoad = false; } else return false; @@ -3036,7 +3036,7 @@ return false; } else { // LDU/STU need an address with at least 4-byte alignment. - if (Alignment < 4) + if (Alignment < Align(4)) return false; if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4))) @@ -14158,13 +14158,13 @@ assert(LD1 && "Input needs to be a LoadSDNode."); return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(), LD1->getBasePtr(), LD1->getPointerInfo(), - LD1->getAlignment()); + LD1->getAlign()); } if (InputsAreReverseConsecutive) { assert(LDL && "Input needs to be a LoadSDNode."); - SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), - LDL->getBasePtr(), LDL->getPointerInfo(), - LDL->getAlignment()); + SDValue Load = + DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), LDL->getBasePtr(), + LDL->getPointerInfo(), LDL->getAlign()); SmallVector Ops; for (int i = N->getNumOperands() - 1; i >= 0; i--) Ops.push_back(i); @@ -15306,7 +15306,7 @@ auto MMOFlags = LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile; SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, - LD->getPointerInfo(), LD->getAlignment(), + LD->getPointerInfo(), LD->getAlign(), MMOFlags, LD->getAAInfo()); SDValue AddPtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), @@ -15314,7 +15314,7 @@ SDValue FloatLoad2 = DAG.getLoad( MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, LD->getPointerInfo().getWithOffset(4), - MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo()); + commonAlignment(LD->getAlign(), 4), MMOFlags, LD->getAAInfo()); if (LD->isIndexed()) { // Note that DAGCombine should re-form any pre-increment load(s) from @@ -15627,7 +15627,7 @@ return SDValue(); SDValue BasePtr = LD->getBasePtr(); SDValue Lo = DAG.getLoad(MVT::i32, dl, LD->getChain(), BasePtr, - LD->getPointerInfo(), LD->getAlignment()); + LD->getPointerInfo(), LD->getAlign()); Lo = DAG.getNode(ISD::BSWAP, dl, MVT::i32, Lo); BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, DAG.getIntPtrConstant(4, dl)); diff --git a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp --- a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp @@ -1472,7 +1472,7 @@ assert(MMO && "Expected a memory operand."); // The memory access must have a proper alignment and no index register. - if (MemAccess->getAlignment() < StoreSize || + if (MemAccess->getAlign().value() < StoreSize || !MemAccess->getOffset().isUndef()) return false; diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -2311,7 +2311,7 @@ Load->getExtensionType() != ExtType) { C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(), Load->getBasePtr(), Load->getPointerInfo(), - Load->getMemoryVT(), Load->getAlignment(), + Load->getMemoryVT(), Load->getAlign(), Load->getMemOperand()->getFlags()); // Update the chain uses. DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), C.Op0.getValue(1)); diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -532,7 +532,7 @@ unsigned StoreSize = N->getMemoryVT().getStoreSize(); - if (N->getAlignment() < StoreSize) + if (N->getAlign().value() < StoreSize) return false; switch (StoreSize) { diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -5087,7 +5087,7 @@ // If this is an unaligned vector, make sure the target supports folding it. auto *Ld = cast(Op.getNode()); if (!Subtarget.hasAVX() && !Subtarget.hasSSEUnalignedMem() && - Ld->getValueSizeInBits(0) == 128 && Ld->getAlignment() < 16) + Ld->getValueSizeInBits(0) == 128 && Ld->getAlign() < Align(16)) return false; // TODO: If this is a non-temporal load and the target has an instruction @@ -9130,7 +9130,7 @@ // Don't create 256-bit non-temporal aligned loads without AVX2 as these // will lower to regular temporal loads and use the cache. - if (LDBase->isNonTemporal() && LDBase->getAlignment() >= 32 && + if (LDBase->isNonTemporal() && LDBase->getAlign() >= Align(32) && VT.is256BitVector() && !Subtarget.hasInt256()) return SDValue(); @@ -48432,7 +48432,7 @@ if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() && Ext == ISD::NON_EXTLOAD && ((Ld->isNonTemporal() && !Subtarget.hasInt256() && - Ld->getAlignment() >= 16) || + Ld->getAlign() >= Align(16)) || (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT, *Ld->getMemOperand(), &Fast) && !Fast))) { @@ -48900,7 +48900,7 @@ // Split under-aligned vector non-temporal stores. if (St->isNonTemporal() && StVT == VT && - St->getAlignment() < VT.getStoreSize()) { + St->getAlign().value() < VT.getStoreSize()) { // ZMM/YMM nt-stores - either it can be stored as a series of shorter // vectors or the legalizer can scalarize it to use MOVNTI. if (VT.is256BitVector() || VT.is512BitVector()) { diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp --- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp +++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp @@ -440,7 +440,7 @@ } } - if (LD->getAlignment() == 2) { + if (LD->getAlign() == Align(2)) { SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr, LD->getPointerInfo(), MVT::i16, Align(2), LD->getMemOperand()->getFlags()); @@ -495,7 +495,7 @@ SDValue Value = ST->getValue(); SDLoc dl(Op); - if (ST->getAlignment() == 2) { + if (ST->getAlign() == Align(2)) { SDValue Low = Value; SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value, DAG.getConstant(16, dl, MVT::i32)); @@ -939,25 +939,25 @@ N->getSuccessOrdering() == AtomicOrdering::Monotonic) && "setInsertFencesForAtomic(true) expects unordered / monotonic"); if (N->getMemoryVT() == MVT::i32) { - if (N->getAlignment() < 4) + if (N->getAlign() < Align(4)) report_fatal_error("atomic load must be aligned"); return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op), N->getChain(), N->getBasePtr(), N->getPointerInfo(), - N->getAlignment(), N->getMemOperand()->getFlags(), + N->getAlign(), N->getMemOperand()->getFlags(), N->getAAInfo(), N->getRanges()); } if (N->getMemoryVT() == MVT::i16) { - if (N->getAlignment() < 2) + if (N->getAlign() < Align(2)) report_fatal_error("atomic load must be aligned"); return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), N->getBasePtr(), N->getPointerInfo(), MVT::i16, - N->getAlignment(), N->getMemOperand()->getFlags(), + N->getAlign(), N->getMemOperand()->getFlags(), N->getAAInfo()); } if (N->getMemoryVT() == MVT::i8) return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), N->getBasePtr(), N->getPointerInfo(), MVT::i8, - N->getAlignment(), N->getMemOperand()->getFlags(), + N->getAlign(), N->getMemOperand()->getFlags(), N->getAAInfo()); return SDValue(); } @@ -970,24 +970,24 @@ N->getSuccessOrdering() == AtomicOrdering::Monotonic) && "setInsertFencesForAtomic(true) expects unordered / monotonic"); if (N->getMemoryVT() == MVT::i32) { - if (N->getAlignment() < 4) + if (N->getAlign() < Align(4)) report_fatal_error("atomic store must be aligned"); return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(), - N->getPointerInfo(), N->getAlignment(), + N->getPointerInfo(), N->getAlign(), N->getMemOperand()->getFlags(), N->getAAInfo()); } if (N->getMemoryVT() == MVT::i16) { - if (N->getAlignment() < 2) + if (N->getAlign() < Align(2)) report_fatal_error("atomic store must be aligned"); return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(), N->getPointerInfo(), MVT::i16, - N->getAlignment(), N->getMemOperand()->getFlags(), + N->getAlign(), N->getMemOperand()->getFlags(), N->getAAInfo()); } if (N->getMemoryVT() == MVT::i8) return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(), N->getPointerInfo(), MVT::i8, - N->getAlignment(), N->getMemOperand()->getFlags(), + N->getAlign(), N->getMemOperand()->getFlags(), N->getAAInfo()); return SDValue(); } @@ -1789,17 +1789,17 @@ unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits(); assert((StoreBits % 8) == 0 && "Store size in bits must be a multiple of 8"); - unsigned Alignment = ST->getAlignment(); + Align Alignment = ST->getAlign(); if (LoadSDNode *LD = dyn_cast(ST->getValue())) { if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() && - LD->getAlignment() == Alignment && + LD->getAlign() == Alignment && !LD->isVolatile() && !LD->isIndexed() && Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) { bool isTail = isInTailCallPosition(DAG, ST, Chain); return DAG.getMemmove(Chain, dl, ST->getBasePtr(), LD->getBasePtr(), DAG.getConstant(StoreBits / 8, dl, MVT::i32), - Align(Alignment), false, isTail, + Alignment, false, isTail, ST->getPointerInfo(), LD->getPointerInfo()); } }