diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -798,7 +798,7 @@ /// Determine if the target supports unaligned memory accesses. bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace = 0, - unsigned Alignment = 1, + Align Alignment = Align(1), bool *Fast = nullptr) const; /// Return hardware support for population count. @@ -1499,7 +1499,7 @@ virtual bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, - unsigned Alignment, + Align Alignment, bool *Fast) = 0; virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0; virtual bool haveFastSqrt(Type *Ty) = 0; @@ -1896,7 +1896,7 @@ return Impl.isFPVectorizationPotentiallyUnsafe(); } bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, - unsigned AddressSpace, unsigned Alignment, + unsigned AddressSpace, Align Alignment, bool *Fast) override { return Impl.allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace, Alignment, Fast); diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -310,7 +310,7 @@ bool isFPVectorizationPotentiallyUnsafe() const { return false; } bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, - unsigned AddressSpace, unsigned Alignment, + unsigned AddressSpace, Align Alignment, bool *Fast) const { return false; } diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -199,7 +199,7 @@ /// \name Scalar TTI Implementations /// @{ bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, - unsigned AddressSpace, unsigned Alignment, + unsigned AddressSpace, Align Alignment, bool *Fast) const { EVT E = EVT::getIntegerVT(Context, BitWidth); return getTLI()->allowsMisalignedMemoryAccesses( diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -1611,7 +1611,7 @@ /// helps to ensure that such replacements don't generate code that causes an /// alignment error (trap) on the target machine. virtual bool allowsMisalignedMemoryAccesses( - EVT, unsigned AddrSpace = 0, unsigned Align = 1, + EVT, unsigned AddrSpace = 0, Align Alignment = Align(1), MachineMemOperand::Flags Flags = MachineMemOperand::MONone, bool * /*Fast*/ = nullptr) const { return false; diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -553,7 +553,7 @@ bool TargetTransformInfo::allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, - unsigned Alignment, + Align Alignment, bool *Fast) const { return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace, Alignment, Fast); diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -6931,11 +6931,11 @@ StoreInst *ST = cast(CombineInst); unsigned AS = ST->getPointerAddressSpace(); - unsigned Align = ST->getAlignment(); + Align Alignment = ST->getAlign(); // Check if this store is supported. if (!TLI.allowsMisalignedMemoryAccesses( TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, - Align)) { + Alignment)) { // If this is not supported, there is no way we can combine // the extract with the store. return false; diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -1057,7 +1057,7 @@ MVT VT = getMVTForLLT(Ty); if (NumMemOps && Op.allowOverlap() && NewTySize < Size && TLI.allowsMisalignedMemoryAccesses( - VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 0, + VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign() : Align(1), MachineMemOperand::MONone, &Fast) && Fast) TySize = Size; diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -194,9 +194,8 @@ // equal to DstAlign (or zero). VT = MVT::i64; if (Op.isFixedDstAlign()) - while ( - Op.getDstAlign() < (VT.getSizeInBits() / 8) && - !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign().value())) + while (Op.getDstAlign() < (VT.getSizeInBits() / 8) && + !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign())) VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); assert(VT.isInteger()); @@ -250,7 +249,7 @@ bool Fast; if (NumMemOps && Op.allowOverlap() && NewVTSize < Size && allowsMisalignedMemoryAccesses( - VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 1, + VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign() : Align(1), MachineMemOperand::MONone, &Fast) && Fast) VTSize = Size; diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -1698,8 +1698,7 @@ } // This is a misaligned access. - return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment.value(), Flags, - Fast); + return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast); } bool TargetLoweringBase::allowsMemoryAccessForAlignment( diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -488,7 +488,7 @@ /// Returns true if the target allows unaligned memory accesses of the /// specified type. bool allowsMisalignedMemoryAccesses( - EVT VT, unsigned AddrSpace = 0, unsigned Align = 1, + EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1), MachineMemOperand::Flags Flags = MachineMemOperand::MONone, bool *Fast = nullptr) const override; /// LLT variant. diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1656,7 +1656,7 @@ } bool AArch64TargetLowering::allowsMisalignedMemoryAccesses( - EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags, + EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const { if (Subtarget->requiresStrictAlign()) return false; @@ -1670,7 +1670,7 @@ // Code that uses clang vector extensions can mark that it // wants unaligned accesses to be treated as fast by // underspecifying alignment to be 1 or 2. - Align <= 2 || + Alignment <= 2 || // Disregard v2i64. Memcpy lowering produces those and splitting // them regresses performance on micro-benchmarks and olden/bh. @@ -4144,7 +4144,7 @@ unsigned AS = StoreNode->getAddressSpace(); Align Alignment = StoreNode->getAlign(); if (Alignment < MemVT.getStoreSize() && - !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment.value(), + !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment, StoreNode->getMemOperand()->getFlags(), nullptr)) { return scalarizeVectorStore(StoreNode, DAG); @@ -11395,8 +11395,8 @@ if (Op.isAligned(AlignCheck)) return true; bool Fast; - return allowsMisalignedMemoryAccesses(VT, 0, 1, MachineMemOperand::MONone, - &Fast) && + return allowsMisalignedMemoryAccesses(VT, 0, Align(1), + MachineMemOperand::MONone, &Fast) && Fast; }; @@ -11426,8 +11426,8 @@ if (Op.isAligned(AlignCheck)) return true; bool Fast; - return allowsMisalignedMemoryAccesses(VT, 0, 1, MachineMemOperand::MONone, - &Fast) && + return allowsMisalignedMemoryAccesses(VT, 0, Align(1), + MachineMemOperand::MONone, &Fast) && Fast; }; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -2890,9 +2890,8 @@ // Expand unaligned loads earlier than legalization. Due to visitation order // problems during legalization, the emitted instructions to pack and unpack // the bytes again are not eliminated in the case of an unaligned copy. - if (!allowsMisalignedMemoryAccesses(VT, AS, Alignment.value(), - LN->getMemOperand()->getFlags(), - &IsFast)) { + if (!allowsMisalignedMemoryAccesses( + VT, AS, Alignment, LN->getMemOperand()->getFlags(), &IsFast)) { SDValue Ops[2]; if (VT.isVector()) @@ -2946,9 +2945,8 @@ // order problems during legalization, the emitted instructions to pack and // unpack the bytes again are not eliminated in the case of an unaligned // copy. - if (!allowsMisalignedMemoryAccesses(VT, AS, Alignment.value(), - SN->getMemOperand()->getFlags(), - &IsFast)) { + if (!allowsMisalignedMemoryAccesses( + VT, AS, Alignment, SN->getMemOperand()->getFlags(), &IsFast)) { if (VT.isVector()) return scalarizeVectorStore(SN, DAG); diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.h b/llvm/lib/Target/AMDGPU/R600ISelLowering.h --- a/llvm/lib/Target/AMDGPU/R600ISelLowering.h +++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.h @@ -50,7 +50,7 @@ const SelectionDAG &DAG) const override; bool allowsMisalignedMemoryAccesses( - EVT VT, unsigned AS, unsigned Align, + EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags = MachineMemOperand::MONone, bool *IsFast = nullptr) const override; diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp @@ -1239,7 +1239,7 @@ Align Alignment = StoreNode->getAlign(); if (Alignment < MemVT.getStoreSize() && - !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment.value(), + !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment, StoreNode->getMemOperand()->getFlags(), nullptr)) { return expandUnalignedStore(StoreNode, DAG); @@ -1640,7 +1640,7 @@ } bool R600TargetLowering::allowsMisalignedMemoryAccesses( - EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags, + EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, bool *IsFast) const { if (IsFast) *IsFast = false; @@ -1655,7 +1655,7 @@ if (IsFast) *IsFast = true; - return VT.bitsGT(MVT::i32) && Align % 4 == 0; + return VT.bitsGT(MVT::i32) && Alignment.value() % 4 == 0; } static SDValue CompactSwizzlableVector( diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h --- a/llvm/lib/Target/AMDGPU/SIISelLowering.h +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h @@ -283,7 +283,7 @@ } bool allowsMisalignedMemoryAccesses( - EVT VT, unsigned AS, unsigned Alignment, + EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags = MachineMemOperand::MONone, bool *IsFast = nullptr) const override; diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -1467,8 +1467,8 @@ } bool SITargetLowering::allowsMisalignedMemoryAccesses( - EVT VT, unsigned AddrSpace, unsigned Alignment, - MachineMemOperand::Flags Flags, bool *IsFast) const { + EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, + bool *IsFast) const { if (IsFast) *IsFast = false; @@ -1482,7 +1482,7 @@ } return allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AddrSpace, - Align(Alignment), Flags, IsFast); + Alignment, Flags, IsFast); } EVT SITargetLowering::getOptimalMemOpType( diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h --- a/llvm/lib/Target/ARM/ARMISelLowering.h +++ b/llvm/lib/Target/ARM/ARMISelLowering.h @@ -396,7 +396,7 @@ /// unaligned memory accesses of the specified type. Returns whether it /// is "fast" by reference in the second argument. bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, - unsigned Align, + Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const override; diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -16594,7 +16594,7 @@ } bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, - unsigned Alignment, + Align Alignment, MachineMemOperand::Flags, bool *Fast) const { // Depends what it gets converted into if the type is weird. @@ -16673,14 +16673,14 @@ bool Fast; if (Op.size() >= 16 && (Op.isAligned(Align(16)) || - (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1, + (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, Align(1), MachineMemOperand::MONone, &Fast) && Fast))) { return MVT::v2f64; } else if (Op.size() >= 8 && (Op.isAligned(Align(8)) || (allowsMisalignedMemoryAccesses( - MVT::f64, 0, 1, MachineMemOperand::MONone, &Fast) && + MVT::f64, 0, Align(1), MachineMemOperand::MONone, &Fast) && Fast))) { return MVT::f64; } diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/llvm/lib/Target/Hexagon/HexagonISelLowering.h --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.h +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.h @@ -318,8 +318,9 @@ bool *Fast) const override; bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, - unsigned Alignment, MachineMemOperand::Flags Flags, bool *Fast) - const override; + Align Alignment, + MachineMemOperand::Flags Flags, + bool *Fast) const override; /// Returns relocation base for the given PIC jumptable. SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -3442,8 +3442,8 @@ } bool HexagonTargetLowering::allowsMisalignedMemoryAccesses( - EVT VT, unsigned AddrSpace, unsigned Alignment, - MachineMemOperand::Flags Flags, bool *Fast) const { + EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, + bool *Fast) const { MVT SVT = VT.getSimpleVT(); if (Subtarget.isHVXVectorType(SVT, true)) return allowsHvxMisalignedMemoryAccesses(SVT, Flags, Fast); diff --git a/llvm/lib/Target/Mips/Mips16ISelLowering.h b/llvm/lib/Target/Mips/Mips16ISelLowering.h --- a/llvm/lib/Target/Mips/Mips16ISelLowering.h +++ b/llvm/lib/Target/Mips/Mips16ISelLowering.h @@ -22,7 +22,7 @@ const MipsSubtarget &STI); bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, - unsigned Align, + Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const override; diff --git a/llvm/lib/Target/Mips/Mips16ISelLowering.cpp b/llvm/lib/Target/Mips/Mips16ISelLowering.cpp --- a/llvm/lib/Target/Mips/Mips16ISelLowering.cpp +++ b/llvm/lib/Target/Mips/Mips16ISelLowering.cpp @@ -156,7 +156,7 @@ } bool Mips16TargetLowering::allowsMisalignedMemoryAccesses( - EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const { + EVT VT, unsigned, Align, MachineMemOperand::Flags, bool *Fast) const { return false; } diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.h b/llvm/lib/Target/Mips/MipsSEISelLowering.h --- a/llvm/lib/Target/Mips/MipsSEISelLowering.h +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.h @@ -41,7 +41,7 @@ const TargetRegisterClass *RC); bool allowsMisalignedMemoryAccesses( - EVT VT, unsigned AS = 0, unsigned Align = 1, + EVT VT, unsigned AS = 0, Align Alignment = Align(1), MachineMemOperand::Flags Flags = MachineMemOperand::MONone, bool *Fast = nullptr) const override; diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp @@ -422,7 +422,7 @@ } bool MipsSETargetLowering::allowsMisalignedMemoryAccesses( - EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const { + EVT VT, unsigned, Align, MachineMemOperand::Flags, bool *Fast) const { MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy; if (Subtarget.systemSupportsUnalignedAccess()) { diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -961,7 +961,7 @@ /// Is unaligned memory access allowed for the given type, and is it fast /// relative to software emulation. bool allowsMisalignedMemoryAccesses( - EVT VT, unsigned AddrSpace, unsigned Align = 1, + EVT VT, unsigned AddrSpace, Align Alignment = Align(1), MachineMemOperand::Flags Flags = MachineMemOperand::MONone, bool *Fast = nullptr) const override; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -15539,9 +15539,7 @@ return isInt<16>(Imm) || isUInt<16>(Imm); } -bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, - unsigned, - unsigned, +bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align, MachineMemOperand::Flags, bool *Fast) const { if (DisablePPCUnaligned) diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/llvm/lib/Target/SystemZ/SystemZISelLowering.h --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.h +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.h @@ -435,8 +435,7 @@ bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I = nullptr) const override; - bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, - unsigned Align, + bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const override; bool isTruncateFree(Type *, Type *) const override; diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -850,7 +850,7 @@ } bool SystemZTargetLowering::allowsMisalignedMemoryAccesses( - EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const { + EVT VT, unsigned, Align, MachineMemOperand::Flags, bool *Fast) const { // Unaligned accesses should never be slower than the expanded version. // We check specifically for aligned accesses in the few cases where // they are required. diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h @@ -66,7 +66,7 @@ bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I = nullptr) const override; - bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace, unsigned Align, + bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const override; bool isIntDivCheap(EVT VT, AttributeList Attr) const override; diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -623,7 +623,7 @@ } bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses( - EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/, + EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/, MachineMemOperand::Flags /*Flags*/, bool *Fast) const { // WebAssembly supports unaligned accesses, though it should be declared // with the p2align attribute on loads and stores which do so, and there diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -919,7 +919,7 @@ /// Returns true if the target allows unaligned memory accesses of the /// specified type. Returns whether it is "fast" in the last argument. - bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, unsigned Align, + bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const override; diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -2340,7 +2340,7 @@ } bool X86TargetLowering::allowsMisalignedMemoryAccesses( - EVT VT, unsigned, unsigned Align, MachineMemOperand::Flags Flags, + EVT VT, unsigned, Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const { if (Fast) { switch (VT.getSizeInBits()) { @@ -2364,7 +2364,7 @@ // well use a regular unaligned vector load. // We don't have any NT loads pre-SSE41. if (!!(Flags & MachineMemOperand::MOLoad)) - return (Align < 16 || !Subtarget.hasSSE41()); + return (Alignment < 16 || !Subtarget.hasSSE41()); return false; } // Misaligned accesses of any size are always allowed. diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -186,7 +186,7 @@ /// Check if this load/store access is misaligned accesses. bool accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace, - unsigned Alignment); + Align Alignment); }; class LoadStoreVectorizerLegacyPass : public FunctionPass { @@ -1061,7 +1061,7 @@ InstructionsProcessed->insert(Chain.begin(), Chain.end()); // If the store is going to be misaligned, don't vectorize it. - if (accessIsMisaligned(SzInBytes, AS, Alignment.value())) { + if (accessIsMisaligned(SzInBytes, AS, Alignment)) { if (S0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) { auto Chains = splitOddVectorElts(Chain, Sz); return vectorizeStoreChain(Chains.first, InstructionsProcessed) | @@ -1206,7 +1206,7 @@ InstructionsProcessed->insert(Chain.begin(), Chain.end()); // If the load is going to be misaligned, don't vectorize it. - if (accessIsMisaligned(SzInBytes, AS, Alignment.value())) { + if (accessIsMisaligned(SzInBytes, AS, Alignment)) { if (L0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) { auto Chains = splitOddVectorElts(Chain, Sz); return vectorizeLoadChain(Chains.first, InstructionsProcessed) | @@ -1301,8 +1301,8 @@ } bool Vectorizer::accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace, - unsigned Alignment) { - if (Alignment % SzInBytes == 0) + Align Alignment) { + if (Alignment.value() % SzInBytes == 0) return false; bool Fast = false;