diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -746,7 +746,7 @@ /// Determine if the target supports unaligned memory accesses. bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace = 0, - unsigned Alignment = 1, + Align Alignment = Align(1), bool *Fast = nullptr) const; /// Return hardware support for population count. @@ -1344,8 +1344,7 @@ virtual bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, - unsigned Alignment, - bool *Fast) = 0; + Align Alignment, bool *Fast) = 0; virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0; virtual bool haveFastSqrt(Type *Ty) = 0; virtual bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) = 0; @@ -1691,7 +1690,7 @@ return Impl.isFPVectorizationPotentiallyUnsafe(); } bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, - unsigned AddressSpace, unsigned Alignment, + unsigned AddressSpace, Align Alignment, bool *Fast) override { return Impl.allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace, Alignment, Fast); diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -265,7 +265,7 @@ bool isFPVectorizationPotentiallyUnsafe() { return false; } bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, - unsigned AddressSpace, unsigned Alignment, + unsigned AddressSpace, Align Alignment, bool *Fast) { return false; } diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -196,7 +196,7 @@ /// \name Scalar TTI Implementations /// @{ bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, - unsigned AddressSpace, unsigned Alignment, + unsigned AddressSpace, Align Alignment, bool *Fast) const { EVT E = EVT::getIntegerVT(Context, BitWidth); return getTLI()->allowsMisalignedMemoryAccesses( diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -1574,7 +1574,7 @@ /// helps to ensure that such replacements don't generate code that causes an /// alignment error (trap) on the target machine. virtual bool allowsMisalignedMemoryAccesses( - EVT, unsigned AddrSpace = 0, unsigned Align = 1, + EVT, unsigned AddrSpace = 0, Align Alignment = Align(1), MachineMemOperand::Flags Flags = MachineMemOperand::MONone, bool * /*Fast*/ = nullptr) const { return false; diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -502,7 +502,7 @@ bool TargetTransformInfo::allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, - unsigned Alignment, + Align Alignment, bool *Fast) const { return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace, Alignment, Fast); diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -6844,11 +6844,10 @@ StoreInst *ST = cast(CombineInst); unsigned AS = ST->getPointerAddressSpace(); - unsigned Align = ST->getAlignment(); // Check if this store is supported. if (!TLI.allowsMisalignedMemoryAccesses( TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, - Align)) { + ST->getAlign())) { // If this is not supported, there is no way we can combine // the extract with the store. return false; diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -895,7 +895,7 @@ MVT VT = getMVTForLLT(Ty); if (NumMemOps && Op.allowOverlap() && NewTySize < Size && TLI.allowsMisalignedMemoryAccesses( - VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 0, + VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign() : Align(1), MachineMemOperand::MONone, &Fast) && Fast) TySize = Size; diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -194,9 +194,8 @@ // equal to DstAlign (or zero). VT = MVT::i64; if (Op.isFixedDstAlign()) - while ( - Op.getDstAlign() < (VT.getSizeInBits() / 8) && - !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign().value())) + while (Op.getDstAlign() < (VT.getSizeInBits() / 8) && + !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign())) VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); assert(VT.isInteger()); @@ -250,7 +249,7 @@ bool Fast; if (NumMemOps && Op.allowOverlap() && NewVTSize < Size && allowsMisalignedMemoryAccesses( - VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 0, + VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign() : Align(1), MachineMemOperand::MONone, &Fast) && Fast) VTSize = Size; diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -1588,7 +1588,8 @@ } // This is a misaligned access. - return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast); + return allowsMisalignedMemoryAccesses(VT, AddrSpace, Align(Alignment), Flags, + Fast); } bool TargetLoweringBase::allowsMemoryAccessForAlignment( diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -434,7 +434,7 @@ /// Returns true if the target allows unaligned memory accesses of the /// specified type. bool allowsMisalignedMemoryAccesses( - EVT VT, unsigned AddrSpace = 0, unsigned Align = 1, + EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1), MachineMemOperand::Flags Flags = MachineMemOperand::MONone, bool *Fast = nullptr) const override; /// LLT variant. diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1280,7 +1280,7 @@ } bool AArch64TargetLowering::allowsMisalignedMemoryAccesses( - EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags, + EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const { if (Subtarget->requiresStrictAlign()) return false; @@ -1294,7 +1294,7 @@ // Code that uses clang vector extensions can mark that it // wants unaligned accesses to be treated as fast by // underspecifying alignment to be 1 or 2. - Align <= 2 || + Alignment <= 2 || // Disregard v2i64. Memcpy lowering produces those and splitting // them regresses performance on micro-benchmarks and olden/bh. @@ -3320,7 +3320,7 @@ unsigned AS = StoreNode->getAddressSpace(); Align Alignment = StoreNode->getAlign(); if (Alignment < MemVT.getStoreSize() && - !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment.value(), + !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment, StoreNode->getMemOperand()->getFlags(), nullptr)) { return scalarizeVectorStore(StoreNode, DAG); @@ -9963,8 +9963,8 @@ if (Op.isAligned(AlignCheck)) return true; bool Fast; - return allowsMisalignedMemoryAccesses(VT, 0, 1, MachineMemOperand::MONone, - &Fast) && + return allowsMisalignedMemoryAccesses(VT, 0, Align(1), + MachineMemOperand::MONone, &Fast) && Fast; }; @@ -9994,8 +9994,8 @@ if (Op.isAligned(AlignCheck)) return true; bool Fast; - return allowsMisalignedMemoryAccesses(VT, 0, 1, MachineMemOperand::MONone, - &Fast) && + return allowsMisalignedMemoryAccesses(VT, 0, Align(1), + MachineMemOperand::MONone, &Fast) && Fast; }; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -2939,9 +2939,8 @@ // Expand unaligned loads earlier than legalization. Due to visitation order // problems during legalization, the emitted instructions to pack and unpack // the bytes again are not eliminated in the case of an unaligned copy. - if (!allowsMisalignedMemoryAccesses(VT, AS, Alignment.value(), - LN->getMemOperand()->getFlags(), - &IsFast)) { + if (!allowsMisalignedMemoryAccesses( + VT, AS, Alignment, LN->getMemOperand()->getFlags(), &IsFast)) { SDValue Ops[2]; if (VT.isVector()) @@ -2995,9 +2994,8 @@ // order problems during legalization, the emitted instructions to pack and // unpack the bytes again are not eliminated in the case of an unaligned // copy. - if (!allowsMisalignedMemoryAccesses(VT, AS, Alignment.value(), - SN->getMemOperand()->getFlags(), - &IsFast)) { + if (!allowsMisalignedMemoryAccesses( + VT, AS, Alignment, SN->getMemOperand()->getFlags(), &IsFast)) { if (VT.isVector()) return scalarizeVectorStore(SN, DAG); diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -243,7 +243,7 @@ unsigned RegSize = Ty.getSizeInBits(); unsigned MemSize = Query.MMODescrs[0].SizeInBits; - unsigned Align = Query.MMODescrs[0].AlignInBits; + unsigned AlignInBits = Query.MMODescrs[0].AlignInBits; unsigned AS = Query.Types[1].getAddressSpace(); // All of these need to be custom lowered to cast the pointer operand. @@ -255,7 +255,7 @@ #if 0 // Accept widening loads based on alignment. if (IsLoad && MemSize < Size) - MemSize = std::max(MemSize, Align); + MemSize = std::max(MemSize, AlignInBits); #endif // Only 1-byte and 2-byte to 32-bit extloads are valid. @@ -286,9 +286,10 @@ assert(RegSize >= MemSize); - if (Align < MemSize) { + if (AlignInBits < MemSize) { const SITargetLowering *TLI = ST.getTargetLowering(); - if (!TLI->allowsMisalignedMemoryAccessesImpl(MemSize, AS, Align / 8)) + if (!TLI->allowsMisalignedMemoryAccessesImpl(MemSize, AS, + Align(AlignInBits / 8))) return false; } @@ -870,10 +871,10 @@ // Split vector extloads. unsigned MemSize = Query.MMODescrs[0].SizeInBits; - unsigned Align = Query.MMODescrs[0].AlignInBits; + unsigned AlignInBits = Query.MMODescrs[0].AlignInBits; if (MemSize < DstTy.getSizeInBits()) - MemSize = std::max(MemSize, Align); + MemSize = std::max(MemSize, AlignInBits); if (DstTy.isVector() && DstTy.getSizeInBits() > MemSize) return true; @@ -895,9 +896,10 @@ return true; } - if (Align < MemSize) { + if (AlignInBits < MemSize) { const SITargetLowering *TLI = ST.getTargetLowering(); - return !TLI->allowsMisalignedMemoryAccessesImpl(MemSize, AS, Align / 8); + return !TLI->allowsMisalignedMemoryAccessesImpl(MemSize, AS, + Align(AlignInBits / 8)); } return false; diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.h b/llvm/lib/Target/AMDGPU/R600ISelLowering.h --- a/llvm/lib/Target/AMDGPU/R600ISelLowering.h +++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.h @@ -50,7 +50,7 @@ const SelectionDAG &DAG) const override; bool allowsMisalignedMemoryAccesses( - EVT VT, unsigned AS, unsigned Align, + EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags = MachineMemOperand::MONone, bool *IsFast = nullptr) const override; diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp @@ -1267,7 +1267,7 @@ Align Alignment = StoreNode->getAlign(); if (Alignment < MemVT.getStoreSize() && - !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment.value(), + !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment, StoreNode->getMemOperand()->getFlags(), nullptr)) { return expandUnalignedStore(StoreNode, DAG); @@ -1668,7 +1668,7 @@ } bool R600TargetLowering::allowsMisalignedMemoryAccesses( - EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags, + EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, bool *IsFast) const { if (IsFast) *IsFast = false; @@ -1683,7 +1683,7 @@ if (IsFast) *IsFast = true; - return VT.bitsGT(MVT::i32) && Align % 4 == 0; + return VT.bitsGT(MVT::i32) && Alignment >= Align(4); } static SDValue CompactSwizzlableVector( diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h --- a/llvm/lib/Target/AMDGPU/SIISelLowering.h +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h @@ -255,12 +255,12 @@ const SelectionDAG &DAG) const override; bool allowsMisalignedMemoryAccessesImpl( - unsigned Size, unsigned AS, unsigned Align, + unsigned Size, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags = MachineMemOperand::MONone, bool *IsFast = nullptr) const; bool allowsMisalignedMemoryAccesses( - EVT VT, unsigned AS, unsigned Align, + EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags = MachineMemOperand::MONone, bool *IsFast = nullptr) const override; diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -1355,7 +1355,7 @@ } bool SITargetLowering::allowsMisalignedMemoryAccessesImpl( - unsigned Size, unsigned AddrSpace, unsigned Align, + unsigned Size, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, bool *IsFast) const { if (IsFast) *IsFast = false; @@ -1365,7 +1365,7 @@ // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte // aligned, 8 byte access in a single operation using ds_read2/write2_b32 // with adjacent offsets. - bool AlignedBy4 = (Align % 4 == 0); + bool AlignedBy4 = Alignment >= 4; if (IsFast) *IsFast = AlignedBy4; @@ -1378,7 +1378,7 @@ if (!Subtarget->hasUnalignedScratchAccess() && (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS || AddrSpace == AMDGPUAS::FLAT_ADDRESS)) { - bool AlignedBy4 = Align >= 4; + bool AlignedBy4 = Alignment >= 4; if (IsFast) *IsFast = AlignedBy4; @@ -1392,8 +1392,9 @@ // Accesses can really be issued as 1-byte aligned or 4-byte aligned, so // 2-byte alignment is worse than 1 unless doing a 2-byte accesss. *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS || - AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ? - Align >= 4 : Align != 2; + AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) + ? Alignment >= 4 + : Alignment != 2; } return true; @@ -1409,11 +1410,11 @@ if (IsFast) *IsFast = true; - return Size >= 32 && Align >= 4; + return Size >= 32 && Alignment >= 4; } bool SITargetLowering::allowsMisalignedMemoryAccesses( - EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags, + EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, bool *IsFast) const { if (IsFast) *IsFast = false; @@ -1428,7 +1429,7 @@ } return allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AddrSpace, - Align, Flags, IsFast); + Alignment, Flags, IsFast); } EVT SITargetLowering::getOptimalMemOpType( diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h --- a/llvm/lib/Target/ARM/ARMISelLowering.h +++ b/llvm/lib/Target/ARM/ARMISelLowering.h @@ -382,7 +382,7 @@ /// unaligned memory accesses of the specified type. Returns whether it /// is "fast" by reference in the second argument. bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, - unsigned Align, + Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const override; diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -16103,7 +16103,7 @@ } bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, - unsigned Alignment, + Align Alignment, MachineMemOperand::Flags, bool *Fast) const { // Depends what it gets converted into if the type is weird. @@ -16173,7 +16173,6 @@ return false; } - EVT ARMTargetLowering::getOptimalMemOpType( const MemOp &Op, const AttributeList &FuncAttributes) const { // See if we can use NEON instructions for this... @@ -16182,14 +16181,14 @@ bool Fast; if (Op.size() >= 16 && (Op.isAligned(Align(16)) || - (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1, + (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, Align(1), MachineMemOperand::MONone, &Fast) && Fast))) { return MVT::v2f64; } else if (Op.size() >= 8 && (Op.isAligned(Align(8)) || (allowsMisalignedMemoryAccesses( - MVT::f64, 0, 1, MachineMemOperand::MONone, &Fast) && + MVT::f64, 0, Align(1), MachineMemOperand::MONone, &Fast) && Fast))) { return MVT::f64; } diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/llvm/lib/Target/Hexagon/HexagonISelLowering.h --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.h +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.h @@ -311,8 +311,9 @@ bool *Fast) const override; bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, - unsigned Alignment, MachineMemOperand::Flags Flags, bool *Fast) - const override; + Align Alignment, + MachineMemOperand::Flags Flags, + bool *Fast) const override; /// Returns relocation base for the given PIC jumptable. SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -3405,8 +3405,8 @@ } bool HexagonTargetLowering::allowsMisalignedMemoryAccesses( - EVT VT, unsigned AddrSpace, unsigned Alignment, - MachineMemOperand::Flags Flags, bool *Fast) const { + EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, + bool *Fast) const { MVT SVT = VT.getSimpleVT(); if (Subtarget.isHVXVectorType(SVT, true)) return allowsHvxMisalignedMemoryAccesses(SVT, Flags, Fast); diff --git a/llvm/lib/Target/Mips/Mips16ISelLowering.h b/llvm/lib/Target/Mips/Mips16ISelLowering.h --- a/llvm/lib/Target/Mips/Mips16ISelLowering.h +++ b/llvm/lib/Target/Mips/Mips16ISelLowering.h @@ -22,7 +22,7 @@ const MipsSubtarget &STI); bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, - unsigned Align, + Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const override; diff --git a/llvm/lib/Target/Mips/Mips16ISelLowering.cpp b/llvm/lib/Target/Mips/Mips16ISelLowering.cpp --- a/llvm/lib/Target/Mips/Mips16ISelLowering.cpp +++ b/llvm/lib/Target/Mips/Mips16ISelLowering.cpp @@ -156,7 +156,7 @@ } bool Mips16TargetLowering::allowsMisalignedMemoryAccesses( - EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const { + EVT VT, unsigned, Align, MachineMemOperand::Flags, bool *Fast) const { return false; } diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.h b/llvm/lib/Target/Mips/MipsSEISelLowering.h --- a/llvm/lib/Target/Mips/MipsSEISelLowering.h +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.h @@ -41,7 +41,7 @@ const TargetRegisterClass *RC); bool allowsMisalignedMemoryAccesses( - EVT VT, unsigned AS = 0, unsigned Align = 1, + EVT VT, unsigned AS = 0, Align Alignment = Align(1), MachineMemOperand::Flags Flags = MachineMemOperand::MONone, bool *Fast = nullptr) const override; diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp @@ -422,7 +422,7 @@ } bool MipsSETargetLowering::allowsMisalignedMemoryAccesses( - EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const { + EVT VT, unsigned, Align, MachineMemOperand::Flags, bool *Fast) const { MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy; if (Subtarget.systemSupportsUnalignedAccess()) { diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -905,7 +905,7 @@ /// Is unaligned memory access allowed for the given type, and is it fast /// relative to software emulation. bool allowsMisalignedMemoryAccesses( - EVT VT, unsigned AddrSpace, unsigned Align = 1, + EVT VT, unsigned AddrSpace, Align Alignment = Align(1), MachineMemOperand::Flags Flags = MachineMemOperand::MONone, bool *Fast = nullptr) const override; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -15982,9 +15982,7 @@ return isInt<16>(Imm) || isUInt<16>(Imm); } -bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, - unsigned, - unsigned, +bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align, MachineMemOperand::Flags, bool *Fast) const { if (DisablePPCUnaligned) diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/llvm/lib/Target/SystemZ/SystemZISelLowering.h --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.h +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.h @@ -438,8 +438,7 @@ bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I = nullptr) const override; - bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, - unsigned Align, + bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const override; bool isTruncateFree(Type *, Type *) const override; diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -846,7 +846,7 @@ } bool SystemZTargetLowering::allowsMisalignedMemoryAccesses( - EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const { + EVT VT, unsigned, Align, MachineMemOperand::Flags, bool *Fast) const { // Unaligned accesses should never be slower than the expanded version. // We check specifically for aligned accesses in the few cases where // they are required. diff --git a/llvm/lib/Target/VE/VEISelLowering.h b/llvm/lib/Target/VE/VEISelLowering.h --- a/llvm/lib/Target/VE/VEISelLowering.h +++ b/llvm/lib/Target/VE/VEISelLowering.h @@ -95,7 +95,7 @@ bool ForCodeSize) const override; /// Returns true if the target allows unaligned memory accesses of the /// specified type. - bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, unsigned Align, + bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const override; diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp --- a/llvm/lib/Target/VE/VEISelLowering.cpp +++ b/llvm/lib/Target/VE/VEISelLowering.cpp @@ -521,7 +521,7 @@ /// alignment error (trap) on the target machine. bool VETargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, - unsigned Align, + Align Alignment, MachineMemOperand::Flags, bool *Fast) const { if (Fast) { diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h @@ -66,7 +66,7 @@ bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I = nullptr) const override; - bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace, unsigned Align, + bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const override; bool isIntDivCheap(EVT VT, AttributeList Attr) const override; diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -580,7 +580,7 @@ } bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses( - EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/, + EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Alignment*/, MachineMemOperand::Flags /*Flags*/, bool *Fast) const { // WebAssembly supports unaligned accesses, though it should be declared // with the p2align attribute on loads and stores which do so, and there diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -899,7 +899,7 @@ /// Returns true if the target allows unaligned memory accesses of the /// specified type. Returns whether it is "fast" in the last argument. - bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, unsigned Align, + bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const override; diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -2289,7 +2289,7 @@ } bool X86TargetLowering::allowsMisalignedMemoryAccesses( - EVT VT, unsigned, unsigned Align, MachineMemOperand::Flags Flags, + EVT VT, unsigned, Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const { if (Fast) { switch (VT.getSizeInBits()) { @@ -2313,7 +2313,7 @@ // well use a regular unaligned vector load. // We don't have any NT loads pre-SSE41. if (!!(Flags & MachineMemOperand::MOLoad)) - return (Align < 16 || !Subtarget.hasSSE41()); + return (Alignment < 16 || !Subtarget.hasSSE41()); return false; } // Misaligned accesses of any size are always allowed. diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -1304,7 +1304,7 @@ bool Fast = false; bool Allows = TTI.allowsMisalignedMemoryAccesses(F.getParent()->getContext(), SzInBytes * 8, AddressSpace, - Alignment, &Fast); + Align(Alignment), &Fast); LLVM_DEBUG(dbgs() << "LSV: Target said misaligned is allowed? " << Allows << " and fast? " << Fast << "\n";); return !Allows || !Fast;