diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h --- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h @@ -1576,6 +1576,8 @@ uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX) { return Value->getLimitedValue(Limit); } + MaybeAlign getMaybeAlignValue() const { return Value->getMaybeAlignValue(); } + Align getAlignValue() const { return Value->getAlignValue(); } bool isOne() const { return Value->isOne(); } bool isNullValue() const { return Value->isZero(); } diff --git a/llvm/include/llvm/IR/Constants.h b/llvm/include/llvm/IR/Constants.h --- a/llvm/include/llvm/IR/Constants.h +++ b/llvm/include/llvm/IR/Constants.h @@ -151,9 +151,19 @@ return Val.getSExtValue(); } - /// Return the constant as an llvm::Align. Note that this method can assert if - /// the value does not fit in 64 bits or is not a power of two. - inline Align getAlignValue() const { return Align(getZExtValue()); } + /// Return the constant as an llvm::MaybeAlign. + /// Note that this method can assert if the value does not fit in 64 bits or + /// is not a power of two. + inline MaybeAlign getMaybeAlignValue() const { + return MaybeAlign(getZExtValue()); + } + + /// Return the constant as an llvm::Align, interpreting `0` as `Align(1)`. + /// Note that this method can assert if the value does not fit in 64 bits or + /// is not a power of two. + inline Align getAlignValue() const { + return getMaybeAlignValue().valueOrOne(); + } /// A helper method that can be used to determine if the constant contained /// within is equal to a constant. This only works for very small values, diff --git a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h --- a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h +++ b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h @@ -25,12 +25,12 @@ bool IsWrite; Type *OpType; uint64_t TypeSize; - unsigned Alignment; + MaybeAlign Alignment; // The mask Value, if we're looking at a masked load/store. Value *MaybeMask; InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite, - class Type *OpType, unsigned Alignment, + class Type *OpType, MaybeAlign Alignment, Value *MaybeMask = nullptr) : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment), MaybeMask(MaybeMask) { diff --git a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp --- a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp +++ b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp @@ -403,7 +403,7 @@ Instruction *InsertPt = CI; BasicBlock *IfBlock = CI->getParent(); Builder.SetInsertPoint(InsertPt); - unsigned AlignVal = cast(Alignment)->getZExtValue(); + MaybeAlign AlignVal = cast(Alignment)->getMaybeAlignValue(); Builder.SetCurrentDebugLocation(CI->getDebugLoc()); @@ -417,8 +417,8 @@ if (cast(Mask)->getAggregateElement(Idx)->isNullValue()) continue; Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx)); - LoadInst *Load = Builder.CreateAlignedLoad( - EltTy, Ptr, MaybeAlign(AlignVal), "Load" + Twine(Idx)); + LoadInst *Load = + Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx)); VResult = Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx)); } @@ -462,8 +462,8 @@ Builder.SetInsertPoint(InsertPt); Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx)); - LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, MaybeAlign(AlignVal), - "Load" + Twine(Idx)); + LoadInst *Load = + Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx)); Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx)); @@ -533,7 +533,7 @@ Builder.SetInsertPoint(InsertPt); Builder.SetCurrentDebugLocation(CI->getDebugLoc()); - MaybeAlign AlignVal(cast(Alignment)->getZExtValue()); + MaybeAlign AlignVal = cast(Alignment)->getMaybeAlignValue(); unsigned VectorWidth = cast(Src->getType())->getNumElements(); // Shorten the way if the mask is a vector of constants. diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -1599,17 +1599,17 @@ SDValue Size = Tmp2.getOperand(1); SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); Chain = SP.getValue(1); - unsigned Align = cast(Tmp3)->getZExtValue(); + Align Alignment = cast(Tmp3)->getAlignValue(); const TargetFrameLowering *TFL = DAG.getSubtarget().getFrameLowering(); unsigned Opc = TFL->getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp ? ISD::ADD : ISD::SUB; - unsigned StackAlign = TFL->getStackAlignment(); + Align StackAlign = TFL->getStackAlign(); Tmp1 = DAG.getNode(Opc, dl, VT, SP, Size); // Value - if (Align > StackAlign) + if (Alignment > StackAlign) Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1, - DAG.getConstant(-(uint64_t)Align, dl, VT)); + DAG.getConstant(-Alignment.value(), dl, VT)); Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true), diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4209,8 +4209,7 @@ // llvm.masked.store.*(Src0, Ptr, alignment, Mask) Src0 = I.getArgOperand(0); Ptr = I.getArgOperand(1); - Alignment = - MaybeAlign(cast(I.getArgOperand(2))->getZExtValue()); + Alignment = cast(I.getArgOperand(2))->getMaybeAlignValue(); Mask = I.getArgOperand(3); }; auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0, @@ -4324,9 +4323,9 @@ SDValue Src0 = getValue(I.getArgOperand(0)); SDValue Mask = getValue(I.getArgOperand(3)); EVT VT = Src0.getValueType(); - MaybeAlign Alignment(cast(I.getArgOperand(2))->getZExtValue()); - if (!Alignment) - Alignment = DAG.getEVTAlign(VT); + Align Alignment = cast(I.getArgOperand(2)) + ->getMaybeAlignValue() + .getValueOr(DAG.getEVTAlign(VT)); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); AAMDNodes AAInfo; @@ -4344,7 +4343,7 @@ MachinePointerInfo(AS), MachineMemOperand::MOStore, // TODO: Make MachineMemOperands aware of scalable // vectors. - MemoryLocation::UnknownSize, *Alignment, AAInfo); + MemoryLocation::UnknownSize, Alignment, AAInfo); if (!UniformBase) { Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); Index = getValue(Ptr); @@ -4365,8 +4364,7 @@ MaybeAlign &Alignment) { // @llvm.masked.load.*(Ptr, alignment, Mask, Src0) Ptr = I.getArgOperand(0); - Alignment = - MaybeAlign(cast(I.getArgOperand(1))->getZExtValue()); + Alignment = cast(I.getArgOperand(1))->getMaybeAlignValue(); Mask = I.getArgOperand(2); Src0 = I.getArgOperand(3); }; @@ -4435,9 +4433,9 @@ const TargetLowering &TLI = DAG.getTargetLoweringInfo(); EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); - MaybeAlign Alignment(cast(I.getArgOperand(1))->getZExtValue()); - if (!Alignment) - Alignment = DAG.getEVTAlign(VT); + Align Alignment = cast(I.getArgOperand(1)) + ->getMaybeAlignValue() + .getValueOr(DAG.getEVTAlign(VT)); AAMDNodes AAInfo; I.getAAMetadata(AAInfo); @@ -4455,7 +4453,7 @@ MachinePointerInfo(AS), MachineMemOperand::MOLoad, // TODO: Make MachineMemOperands aware of scalable // vectors. - MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges); + MemoryLocation::UnknownSize, Alignment, AAInfo, Ranges); if (!UniformBase) { Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -3757,10 +3757,10 @@ auto *MemCI = cast(NewCall); // All mem intrinsics support dest alignment. const ConstantInt *Align = cast(CI->getArgOperand(3)); - MemCI->setDestAlignment(Align->getZExtValue()); + MemCI->setDestAlignment(Align->getMaybeAlignValue()); // Memcpy/Memmove also support source alignment. if (auto *MTI = dyn_cast(MemCI)) - MTI->setSourceAlignment(Align->getZExtValue()); + MTI->setSourceAlignment(Align->getMaybeAlignValue()); break; } } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -9109,7 +9109,8 @@ SDNode *Node = Op.getNode(); SDValue Chain = Op.getOperand(0); SDValue Size = Op.getOperand(1); - MaybeAlign Align(cast(Op.getOperand(2))->getZExtValue()); + MaybeAlign Align = + cast(Op.getOperand(2))->getMaybeAlignValue(); EVT VT = Node->getValueType(0); if (DAG.getMachineFunction().getFunction().hasFnAttribute( diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp @@ -590,8 +590,8 @@ if (!isa(PacketSize) || !isa(PacketAlign)) return false; unsigned Size = cast(PacketSize)->getZExtValue(); - unsigned Align = cast(PacketAlign)->getZExtValue(); - if (Size != Align || !isPowerOf2_32(Size)) + Align Alignment = cast(PacketAlign)->getAlignValue(); + if (Alignment != Size) return false; Type *PtrElemTy; diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -3127,7 +3127,7 @@ SDValue Size = Tmp2.getOperand(1); SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); Chain = SP.getValue(1); - MaybeAlign Alignment(cast(Tmp3)->getZExtValue()); + MaybeAlign Alignment = cast(Tmp3)->getMaybeAlignValue(); const GCNSubtarget &ST = MF.getSubtarget(); const TargetFrameLowering *TFL = ST.getFrameLowering(); unsigned Opc = diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -17758,7 +17758,8 @@ if (DAG.getMachineFunction().getFunction().hasFnAttribute( "no-stack-arg-probe")) { - MaybeAlign Align(cast(Op.getOperand(2))->getZExtValue()); + MaybeAlign Align = + cast(Op.getOperand(2))->getMaybeAlignValue(); SDValue SP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); Chain = SP.getValue(1); SP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size); @@ -17969,7 +17970,7 @@ Info.ptrVal = I.getArgOperand(0); Info.offset = 0; Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); - Info.align = MaybeAlign(cast(AlignArg)->getZExtValue()); + Info.align = cast(AlignArg)->getMaybeAlignValue(); // volatile loads with NEON intrinsics not supported Info.flags = MachineMemOperand::MOLoad; return true; @@ -18010,7 +18011,7 @@ Info.ptrVal = I.getArgOperand(0); Info.offset = 0; Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); - Info.align = MaybeAlign(cast(AlignArg)->getZExtValue()); + Info.align = cast(AlignArg)->getMaybeAlignValue(); // volatile stores with NEON intrinsics not supported Info.flags = MachineMemOperand::MOStore; return true; diff --git a/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp b/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp --- a/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp +++ b/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp @@ -78,7 +78,7 @@ // Check this is a valid gather with correct alignment bool isLegalTypeAndAlignment(unsigned NumElements, unsigned ElemSize, - unsigned Alignment); + Align Alignment); // Check whether Ptr is hidden behind a bitcast and look through it void lookThroughBitcast(Value *&Ptr); // Check for a getelementptr and deduce base and offsets from it, on success @@ -155,12 +155,12 @@ bool MVEGatherScatterLowering::isLegalTypeAndAlignment(unsigned NumElements, unsigned ElemSize, - unsigned Alignment) { + Align Alignment) { if (((NumElements == 4 && (ElemSize == 32 || ElemSize == 16 || ElemSize == 8)) || (NumElements == 8 && (ElemSize == 16 || ElemSize == 8)) || (NumElements == 16 && ElemSize == 8)) && - ElemSize / 8 <= Alignment) + Alignment >= ElemSize / 8) return true; LLVM_DEBUG(dbgs() << "masked gathers/scatters: instruction does not have " << "valid alignment or vector type \n"); @@ -306,7 +306,7 @@ // Potentially optimising the addressing modes as we do so. auto *Ty = cast(I->getType()); Value *Ptr = I->getArgOperand(0); - unsigned Alignment = cast(I->getArgOperand(1))->getZExtValue(); + Align Alignment = cast(I->getArgOperand(1))->getAlignValue(); Value *Mask = I->getArgOperand(2); Value *PassThru = I->getArgOperand(3); @@ -466,7 +466,7 @@ // Potentially optimising the addressing modes as we do so. Value *Input = I->getArgOperand(0); Value *Ptr = I->getArgOperand(1); - unsigned Alignment = cast(I->getArgOperand(2))->getZExtValue(); + Align Alignment = cast(I->getArgOperand(2))->getAlignValue(); auto *Ty = cast(Input->getType()); if (!isLegalTypeAndAlignment(Ty->getNumElements(), Ty->getScalarSizeInBits(), diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.h b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.h --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.h +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.h @@ -59,9 +59,8 @@ inline bool SelectAddrGP(SDValue &N, SDValue &R); inline bool SelectAnyImm(SDValue &N, SDValue &R); inline bool SelectAnyInt(SDValue &N, SDValue &R); - bool SelectAnyImmediate(SDValue &N, SDValue &R, uint32_t LogAlign); - bool SelectGlobalAddress(SDValue &N, SDValue &R, bool UseGP, - uint32_t LogAlign); + bool SelectAnyImmediate(SDValue &N, SDValue &R, Align Alignment); + bool SelectGlobalAddress(SDValue &N, SDValue &R, bool UseGP, Align Alignment); bool SelectAddrFI(SDValue &N, SDValue &R); bool DetectUseSxtw(SDValue &N, SDValue &R); diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -1315,28 +1315,28 @@ } inline bool HexagonDAGToDAGISel::SelectAddrGA(SDValue &N, SDValue &R) { - return SelectGlobalAddress(N, R, false, 0); + return SelectGlobalAddress(N, R, false, Align(1)); } inline bool HexagonDAGToDAGISel::SelectAddrGP(SDValue &N, SDValue &R) { - return SelectGlobalAddress(N, R, true, 0); + return SelectGlobalAddress(N, R, true, Align(1)); } inline bool HexagonDAGToDAGISel::SelectAnyImm(SDValue &N, SDValue &R) { - return SelectAnyImmediate(N, R, 0); + return SelectAnyImmediate(N, R, Align(1)); } inline bool HexagonDAGToDAGISel::SelectAnyImm0(SDValue &N, SDValue &R) { - return SelectAnyImmediate(N, R, 0); + return SelectAnyImmediate(N, R, Align(1)); } inline bool HexagonDAGToDAGISel::SelectAnyImm1(SDValue &N, SDValue &R) { - return SelectAnyImmediate(N, R, 1); + return SelectAnyImmediate(N, R, Align(2)); } inline bool HexagonDAGToDAGISel::SelectAnyImm2(SDValue &N, SDValue &R) { - return SelectAnyImmediate(N, R, 2); + return SelectAnyImmediate(N, R, Align(4)); } inline bool HexagonDAGToDAGISel::SelectAnyImm3(SDValue &N, SDValue &R) { - return SelectAnyImmediate(N, R, 3); + return SelectAnyImmediate(N, R, Align(8)); } inline bool HexagonDAGToDAGISel::SelectAnyInt(SDValue &N, SDValue &R) { @@ -1348,17 +1348,13 @@ } bool HexagonDAGToDAGISel::SelectAnyImmediate(SDValue &N, SDValue &R, - uint32_t LogAlign) { - auto IsAligned = [LogAlign] (uint64_t V) -> bool { - return alignTo(V, (uint64_t)1 << LogAlign) == V; - }; - + Align Alignment) { switch (N.getOpcode()) { case ISD::Constant: { if (N.getValueType() != MVT::i32) return false; int32_t V = cast(N)->getZExtValue(); - if (!IsAligned(V)) + if (!isAligned(Alignment, V)) return false; R = CurDAG->getTargetConstant(V, SDLoc(N), N.getValueType()); return true; @@ -1366,37 +1362,34 @@ case HexagonISD::JT: case HexagonISD::CP: // These are assumed to always be aligned at least 8-byte boundary. - if (LogAlign > 3) + if (Alignment > Align(8)) return false; R = N.getOperand(0); return true; case ISD::ExternalSymbol: // Symbols may be aligned at any boundary. - if (LogAlign > 0) + if (Alignment > Align(1)) return false; R = N; return true; case ISD::BlockAddress: // Block address is always aligned at least 4-byte boundary. - if (LogAlign > 2 || !IsAligned(cast(N)->getOffset())) + if (Alignment > Align(4) || + !isAligned(Alignment, cast(N)->getOffset())) return false; R = N; return true; } - if (SelectGlobalAddress(N, R, false, LogAlign) || - SelectGlobalAddress(N, R, true, LogAlign)) + if (SelectGlobalAddress(N, R, false, Alignment) || + SelectGlobalAddress(N, R, true, Alignment)) return true; return false; } bool HexagonDAGToDAGISel::SelectGlobalAddress(SDValue &N, SDValue &R, - bool UseGP, uint32_t LogAlign) { - auto IsAligned = [LogAlign] (uint64_t V) -> bool { - return alignTo(V, (uint64_t)1 << LogAlign) == V; - }; - + bool UseGP, Align Alignment) { switch (N.getOpcode()) { case ISD::ADD: { SDValue N0 = N.getOperand(0); @@ -1407,10 +1400,9 @@ if (!UseGP && GAOpc != HexagonISD::CONST32) return false; if (ConstantSDNode *Const = dyn_cast(N1)) { - SDValue Addr = N0.getOperand(0); - // For the purpose of alignment, sextvalue and zextvalue are the same. - if (!IsAligned(Const->getZExtValue())) + if (!isAligned(Alignment, Const->getZExtValue())) return false; + SDValue Addr = N0.getOperand(0); if (GlobalAddressSDNode *GA = dyn_cast(Addr)) { if (GA->getOpcode() == ISD::TargetGlobalAddress) { uint64_t NewOff = GA->getOffset() + (uint64_t)Const->getSExtValue(); diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -3780,8 +3780,7 @@ Info.ptrVal = I.getArgOperand(0); Info.offset = 0; Info.flags = MachineMemOperand::MOLoad; - Info.align = - MaybeAlign(cast(I.getArgOperand(1))->getZExtValue()); + Info.align = cast(I.getArgOperand(1))->getMaybeAlignValue(); return true; } @@ -3800,8 +3799,7 @@ Info.ptrVal = I.getArgOperand(0); Info.offset = 0; Info.flags = MachineMemOperand::MOLoad; - Info.align = - MaybeAlign(cast(I.getArgOperand(1))->getZExtValue()); + Info.align = cast(I.getArgOperand(1))->getMaybeAlignValue(); return true; } diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp --- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -240,7 +240,7 @@ /// bit signed displacement. /// Returns false if it can be represented by [r+imm], which are preferred. bool SelectAddrIdx(SDValue N, SDValue &Base, SDValue &Index) { - return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG, 0); + return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG, None); } /// SelectAddrIdx4 - Given the specified address, check to see if it can be @@ -250,7 +250,8 @@ /// displacement must be a multiple of 4. /// Returns false if it can be represented by [r+imm], which are preferred. bool SelectAddrIdxX4(SDValue N, SDValue &Base, SDValue &Index) { - return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG, 4); + return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG, + Align(4)); } /// SelectAddrIdx16 - Given the specified address, check to see if it can be @@ -260,7 +261,8 @@ /// displacement must be a multiple of 16. /// Returns false if it can be represented by [r+imm], which are preferred. bool SelectAddrIdxX16(SDValue N, SDValue &Base, SDValue &Index) { - return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG, 16); + return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG, + Align(16)); } /// SelectAddrIdxOnly - Given the specified address, force it to be @@ -275,21 +277,22 @@ /// displacement. bool SelectAddrImm(SDValue N, SDValue &Disp, SDValue &Base) { - return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, 0); + return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, None); } /// SelectAddrImmX4 - Returns true if the address N can be represented by /// a base register plus a signed 16-bit displacement that is a multiple of /// 4 (last parameter). Suitable for use by STD and friends. bool SelectAddrImmX4(SDValue N, SDValue &Disp, SDValue &Base) { - return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, 4); + return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, Align(4)); } /// SelectAddrImmX16 - Returns true if the address N can be represented by /// a base register plus a signed 16-bit displacement that is a multiple of /// 16(last parameter). Suitable for use by STXV and friends. bool SelectAddrImmX16(SDValue N, SDValue &Disp, SDValue &Base) { - return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, 16); + return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, + Align(16)); } // Select an address into a single register. diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -722,7 +722,7 @@ /// Returns false if it can be represented by [r+imm], which are preferred. bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG, - unsigned EncodingAlignment = 0) const; + MaybeAlign EncodingAlignment = None) const; /// SelectAddressRegImm - Returns true if the address N can be represented /// by a base register plus a signed 16-bit displacement [r+imm], and if it @@ -731,7 +731,7 @@ /// requirement, i.e. multiples of 4 for DS form. bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, - unsigned EncodingAlignment) const; + MaybeAlign EncodingAlignment) const; /// SelectAddressRegRegOnly - Given the specified addressed, force it to be /// represented as an indexed [r+r] operation. diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -2436,22 +2436,22 @@ /// non-zero and N can be represented by a base register plus a signed 16-bit /// displacement, make a more precise judgement by checking (displacement % \p /// EncodingAlignment). -bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, - SDValue &Index, SelectionDAG &DAG, - unsigned EncodingAlignment) const { +bool PPCTargetLowering::SelectAddressRegReg( + SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG, + MaybeAlign EncodingAlignment) const { // If we have a PC Relative target flag don't select as [reg+reg]. It will be // a [pc+imm]. if (SelectAddressPCRel(N, Base)) return false; - int16_t imm = 0; + int16_t Imm = 0; if (N.getOpcode() == ISD::ADD) { // Is there any SPE load/store (f64), which can't handle 16bit offset? // SPE load/store can only handle 8-bit offsets. if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG)) return true; - if (isIntS16Immediate(N.getOperand(1), imm) && - (!EncodingAlignment || !(imm % EncodingAlignment))) + if (isIntS16Immediate(N.getOperand(1), Imm) && + (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) return false; // r+i if (N.getOperand(1).getOpcode() == PPCISD::Lo) return false; // r+i @@ -2460,8 +2460,8 @@ Index = N.getOperand(1); return true; } else if (N.getOpcode() == ISD::OR) { - if (isIntS16Immediate(N.getOperand(1), imm) && - (!EncodingAlignment || !(imm % EncodingAlignment))) + if (isIntS16Immediate(N.getOperand(1), Imm) && + (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) return false; // r+i can fold it if we can. // If this is an or of disjoint bitfields, we can codegen this as an add @@ -2527,10 +2527,9 @@ /// a signed 16-bit displacement [r+imm], and if it is not better /// represented as reg+reg. If \p EncodingAlignment is non-zero, only accept /// displacements that are multiples of that value. -bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, - SDValue &Base, - SelectionDAG &DAG, - unsigned EncodingAlignment) const { +bool PPCTargetLowering::SelectAddressRegImm( + SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, + MaybeAlign EncodingAlignment) const { // FIXME dl should come from parent load or store, not from address SDLoc dl(N); @@ -2546,7 +2545,7 @@ if (N.getOpcode() == ISD::ADD) { int16_t imm = 0; if (isIntS16Immediate(N.getOperand(1), imm) && - (!EncodingAlignment || (imm % EncodingAlignment) == 0)) { + (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) { Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); if (FrameIndexSDNode *FI = dyn_cast(N.getOperand(0))) { Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); @@ -2570,7 +2569,7 @@ } else if (N.getOpcode() == ISD::OR) { int16_t imm = 0; if (isIntS16Immediate(N.getOperand(1), imm) && - (!EncodingAlignment || (imm % EncodingAlignment) == 0)) { + (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) { // If this is an or of disjoint bitfields, we can codegen this as an add // (for better address arithmetic) if the LHS and RHS of the OR are // provably disjoint. @@ -2597,7 +2596,7 @@ // this as "d, 0" int16_t Imm; if (isIntS16Immediate(CN, Imm) && - (!EncodingAlignment || (Imm % EncodingAlignment) == 0)) { + (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) { Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, CN->getValueType(0)); @@ -2607,7 +2606,8 @@ // Handle 32-bit sext immediates with LIS + addr mode. if ((CN->getValueType(0) == MVT::i32 || (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && - (!EncodingAlignment || (CN->getZExtValue() % EncodingAlignment) == 0)) { + (!EncodingAlignment || + isAligned(*EncodingAlignment, CN->getZExtValue()))) { int Addr = (int)CN->getZExtValue(); // Otherwise, break this down into an LIS + disp. @@ -2794,14 +2794,14 @@ // LDU/STU can only handle immediates that are a multiple of 4. if (VT != MVT::i64) { - if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0)) + if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None)) return false; } else { // LDU/STU need an address with at least 4-byte alignment. if (Alignment < 4) return false; - if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4)) + if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4))) return false; } diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp --- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp @@ -2549,7 +2549,8 @@ const SparcSubtarget *Subtarget) { SDValue Chain = Op.getOperand(0); // Legalize the chain. SDValue Size = Op.getOperand(1); // Legalize the size. - MaybeAlign Alignment(cast(Op.getOperand(2))->getZExtValue()); + MaybeAlign Alignment = + cast(Op.getOperand(2))->getMaybeAlignValue(); Align StackAlign = Subtarget->getFrameLowering()->getStackAlign(); EVT VT = Size->getValueType(0); SDLoc dl(Op); diff --git a/llvm/lib/Transforms/Coroutines/CoroInstr.h b/llvm/lib/Transforms/Coroutines/CoroInstr.h --- a/llvm/lib/Transforms/Coroutines/CoroInstr.h +++ b/llvm/lib/Transforms/Coroutines/CoroInstr.h @@ -212,7 +212,7 @@ } Align getStorageAlignment() const { - return Align(cast(getArgOperand(AlignArg))->getZExtValue()); + return cast(getArgOperand(AlignArg))->getAlignValue(); } Value *getStorage() const { @@ -347,7 +347,7 @@ /// The required alignment of the promise. This must match the /// alignment of the promise alloca in the coroutine. Align getAlignment() const { - return Align(cast(getArgOperand(AlignArg))->getZExtValue()); + return cast(getArgOperand(AlignArg))->getAlignValue(); } // Methods to support type inquiry through isa, cast, and dyn_cast: @@ -468,8 +468,8 @@ Value *getSize() const { return getArgOperand(SizeArg); } - unsigned getAlignment() const { - return cast(getArgOperand(AlignArg))->getZExtValue(); + Align getAlignment() const { + return cast(getArgOperand(AlignArg))->getAlignValue(); } // Methods to support type inquiry through isa, cast, and dyn_cast: diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -1144,7 +1144,7 @@ // If the mask is all ones, this is a plain vector store of the 1st argument. if (ConstMask->isAllOnesValue()) { Value *StorePtr = II.getArgOperand(1); - Align Alignment(cast(II.getArgOperand(2))->getZExtValue()); + Align Alignment = cast(II.getArgOperand(2))->getAlignValue(); return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment); } @@ -3383,8 +3383,9 @@ case Intrinsic::arm_neon_vst4lane: { Align MemAlign = getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT); unsigned AlignArg = II->getNumArgOperands() - 1; - ConstantInt *IntrAlign = dyn_cast(II->getArgOperand(AlignArg)); - if (IntrAlign && IntrAlign->getZExtValue() < MemAlign.value()) + Value *AlignArgOp = II->getArgOperand(AlignArg); + MaybeAlign Align = cast(AlignArgOp)->getMaybeAlignValue(); + if (Align && *Align < MemAlign) return replaceOperand(*II, AlignArg, ConstantInt::get(Type::getInt32Ty(II->getContext()), MemAlign.value(), false)); @@ -4546,11 +4547,10 @@ Call.getContext(), Op1C->getZExtValue())); // Add alignment attribute if alignment is a power of two constant. if (Op0C && Op0C->getValue().ult(llvm::Value::MaximumAlignment)) { - uint64_t AlignmentVal = Op0C->getZExtValue(); - if (llvm::isPowerOf2_64(AlignmentVal)) - Call.addAttribute(AttributeList::ReturnIndex, - Attribute::getWithAlignment(Call.getContext(), - Align(AlignmentVal))); + if (MaybeAlign AlignmentVal = Op0C->getMaybeAlignValue()) + Call.addAttribute( + AttributeList::ReturnIndex, + Attribute::getWithAlignment(Call.getContext(), *AlignmentVal)); } } else if (isReallocLikeFn(&Call, TLI) && Op1C) { Call.addAttribute(AttributeList::ReturnIndex, diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -1378,23 +1378,22 @@ if (!ClInstrumentReads || ignoreAccess(LI->getPointerOperand())) return; Interesting.emplace_back(I, LI->getPointerOperandIndex(), false, - LI->getType(), LI->getAlignment()); + LI->getType(), LI->getAlign()); } else if (StoreInst *SI = dyn_cast(I)) { if (!ClInstrumentWrites || ignoreAccess(SI->getPointerOperand())) return; Interesting.emplace_back(I, SI->getPointerOperandIndex(), true, - SI->getValueOperand()->getType(), - SI->getAlignment()); + SI->getValueOperand()->getType(), SI->getAlign()); } else if (AtomicRMWInst *RMW = dyn_cast(I)) { if (!ClInstrumentAtomics || ignoreAccess(RMW->getPointerOperand())) return; Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true, - RMW->getValOperand()->getType(), 0); + RMW->getValOperand()->getType(), None); } else if (AtomicCmpXchgInst *XCHG = dyn_cast(I)) { if (!ClInstrumentAtomics || ignoreAccess(XCHG->getPointerOperand())) return; Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true, - XCHG->getCompareOperand()->getType(), 0); + XCHG->getCompareOperand()->getType(), None); } else if (auto CI = dyn_cast(I)) { auto *F = CI->getCalledFunction(); if (F && (F->getName().startswith("llvm.masked.load.") || @@ -1409,11 +1408,10 @@ if (ignoreAccess(BasePtr)) return; auto Ty = cast(BasePtr->getType())->getElementType(); - unsigned Alignment = 1; + MaybeAlign Alignment = Align(1); // Otherwise no alignment guarantees. We probably got Undef. - if (auto AlignmentConstant = - dyn_cast(CI->getOperand(1 + OpOffset))) - Alignment = (unsigned)AlignmentConstant->getZExtValue(); + if (auto *Op = dyn_cast(CI->getOperand(1 + OpOffset))) + Alignment = Op->getMaybeAlignValue(); Value *Mask = CI->getOperand(2 + OpOffset); Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask); } else { @@ -1422,7 +1420,7 @@ ignoreAccess(CI->getArgOperand(ArgNo))) continue; Type *Ty = CI->getParamByValType(ArgNo); - Interesting.emplace_back(I, ArgNo, false, Ty, 1); + Interesting.emplace_back(I, ArgNo, false, Ty, Align(1)); } } } @@ -1484,7 +1482,7 @@ static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, - unsigned Alignment, unsigned Granularity, + MaybeAlign Alignment, unsigned Granularity, uint32_t TypeSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) { @@ -1492,7 +1490,7 @@ // if the data is properly aligned. if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 || TypeSize == 128) && - (Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8)) + (!Alignment || *Alignment >= Granularity || *Alignment >= TypeSize / 8)) return Pass->instrumentAddress(I, InsertBefore, Addr, TypeSize, IsWrite, nullptr, UseCalls, Exp); Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeSize, @@ -1502,7 +1500,7 @@ static void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, const DataLayout &DL, Type *IntptrTy, Value *Mask, Instruction *I, - Value *Addr, unsigned Alignment, + Value *Addr, MaybeAlign Alignment, unsigned Granularity, uint32_t TypeSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) { diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp @@ -539,30 +539,29 @@ if (!ClInstrumentReads || ignoreAccess(LI->getPointerOperand())) return; Interesting.emplace_back(I, LI->getPointerOperandIndex(), false, - LI->getType(), LI->getAlignment()); + LI->getType(), LI->getAlign()); } else if (StoreInst *SI = dyn_cast(I)) { if (!ClInstrumentWrites || ignoreAccess(SI->getPointerOperand())) return; Interesting.emplace_back(I, SI->getPointerOperandIndex(), true, - SI->getValueOperand()->getType(), - SI->getAlignment()); + SI->getValueOperand()->getType(), SI->getAlign()); } else if (AtomicRMWInst *RMW = dyn_cast(I)) { if (!ClInstrumentAtomics || ignoreAccess(RMW->getPointerOperand())) return; Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true, - RMW->getValOperand()->getType(), 0); + RMW->getValOperand()->getType(), None); } else if (AtomicCmpXchgInst *XCHG = dyn_cast(I)) { if (!ClInstrumentAtomics || ignoreAccess(XCHG->getPointerOperand())) return; Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true, - XCHG->getCompareOperand()->getType(), 0); + XCHG->getCompareOperand()->getType(), None); } else if (auto CI = dyn_cast(I)) { for (unsigned ArgNo = 0; ArgNo < CI->getNumArgOperands(); ArgNo++) { if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) || ignoreAccess(CI->getArgOperand(ArgNo))) continue; Type *Ty = CI->getParamByValType(ArgNo); - Interesting.emplace_back(I, ArgNo, false, Ty, 1); + Interesting.emplace_back(I, ArgNo, false, Ty, Align(1)); } } } @@ -733,8 +732,8 @@ IRBuilder<> IRB(O.getInsn()); if (isPowerOf2_64(O.TypeSize) && (O.TypeSize / 8 <= (1UL << (kNumberOfAccessSizes - 1))) && - (O.Alignment >= (1UL << Mapping.Scale) || O.Alignment == 0 || - O.Alignment >= O.TypeSize / 8)) { + (!O.Alignment || *O.Alignment >= (1UL << Mapping.Scale) || + *O.Alignment >= O.TypeSize / 8)) { size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeSize); if (ClInstrumentWithCalls) { IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex], diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -2956,7 +2956,7 @@ unsigned OffsetWidth = DL.getIndexSizeInBits(OtherAS); APInt OtherOffset(OffsetWidth, NewBeginOffset - BeginOffset); Align OtherAlign = - assumeAligned(IsDest ? II.getSourceAlignment() : II.getDestAlignment()); + (IsDest ? II.getSourceAlign() : II.getDestAlign()).valueOrOne(); OtherAlign = commonAlignment(OtherAlign, OtherOffset.zextOrTrunc(64).getZExtValue());