diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -156,7 +156,7 @@ bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info); - bool isLegalMaskedLoadStore(Type *DataType, MaybeAlign Alignment) { + bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) { if (!isa(DataType) || !ST->hasSVE()) return false; @@ -172,11 +172,11 @@ return false; } - bool isLegalMaskedLoad(Type *DataType, MaybeAlign Alignment) { + bool isLegalMaskedLoad(Type *DataType, Align Alignment) { return isLegalMaskedLoadStore(DataType, Alignment); } - bool isLegalMaskedStore(Type *DataType, MaybeAlign Alignment) { + bool isLegalMaskedStore(Type *DataType, Align Alignment) { return isLegalMaskedLoadStore(DataType, Alignment); } diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h @@ -153,15 +153,15 @@ bool isProfitableLSRChainElement(Instruction *I); - bool isLegalMaskedLoad(Type *DataTy, MaybeAlign Alignment); + bool isLegalMaskedLoad(Type *DataTy, Align Alignment); - bool isLegalMaskedStore(Type *DataTy, MaybeAlign Alignment) { + bool isLegalMaskedStore(Type *DataTy, Align Alignment) { return isLegalMaskedLoad(DataTy, Alignment); } - bool isLegalMaskedGather(Type *Ty, MaybeAlign Alignment); + bool isLegalMaskedGather(Type *Ty, Align Alignment); - bool isLegalMaskedScatter(Type *Ty, MaybeAlign Alignment) { + bool isLegalMaskedScatter(Type *Ty, Align Alignment) { return isLegalMaskedGather(Ty, Alignment); } diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -580,7 +580,7 @@ return false; } -bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, MaybeAlign Alignment) { +bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) { if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps()) return false; @@ -596,12 +596,11 @@ } unsigned EltWidth = DataTy->getScalarSizeInBits(); - return (EltWidth == 32 && (!Alignment || *Alignment >= 4)) || - (EltWidth == 16 && (!Alignment || *Alignment >= 2)) || - (EltWidth == 8); + return (EltWidth == 32 && Alignment >= 4) || + (EltWidth == 16 && Alignment >= 2) || (EltWidth == 8); } -bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, MaybeAlign Alignment) { +bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) { if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps()) return false; @@ -618,8 +617,8 @@ return false; unsigned EltWidth = Ty->getScalarSizeInBits(); - return ((EltWidth == 32 && (!Alignment || *Alignment >= 4)) || - (EltWidth == 16 && (!Alignment || *Alignment >= 2)) || EltWidth == 8); + return ((EltWidth == 32 && Alignment >= 4) || + (EltWidth == 16 && Alignment >= 2) || EltWidth == 8); } int ARMTTIImpl::getMemcpyCost(const Instruction *I) { diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.h b/llvm/lib/Target/X86/X86TargetTransformInfo.h --- a/llvm/lib/Target/X86/X86TargetTransformInfo.h +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.h @@ -200,12 +200,12 @@ bool isLSRCostLess(TargetTransformInfo::LSRCost &C1, TargetTransformInfo::LSRCost &C2); bool canMacroFuseCmp(); - bool isLegalMaskedLoad(Type *DataType, MaybeAlign Alignment); - bool isLegalMaskedStore(Type *DataType, MaybeAlign Alignment); + bool isLegalMaskedLoad(Type *DataType, Align Alignment); + bool isLegalMaskedStore(Type *DataType, Align Alignment); bool isLegalNTLoad(Type *DataType, Align Alignment); bool isLegalNTStore(Type *DataType, Align Alignment); - bool isLegalMaskedGather(Type *DataType, MaybeAlign Alignment); - bool isLegalMaskedScatter(Type *DataType, MaybeAlign Alignment); + bool isLegalMaskedGather(Type *DataType, Align Alignment); + bool isLegalMaskedScatter(Type *DataType, Align Alignment); bool isLegalMaskedExpandLoad(Type *DataType); bool isLegalMaskedCompressStore(Type *DataType); bool hasDivRemOp(Type *DataType, bool IsSigned); diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -3046,8 +3046,8 @@ unsigned NumElem = SrcVTy->getNumElements(); auto *MaskTy = FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem); - if ((IsLoad && !isLegalMaskedLoad(SrcVTy, MaybeAlign(Alignment))) || - (IsStore && !isLegalMaskedStore(SrcVTy, MaybeAlign(Alignment))) || + if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Align(Alignment))) || + (IsStore && !isLegalMaskedStore(SrcVTy, Align(Alignment))) || !isPowerOf2_32(NumElem)) { // Scalarization APInt DemandedElts = APInt::getAllOnesValue(NumElem); @@ -3982,9 +3982,9 @@ bool Scalarize = false; if ((Opcode == Instruction::Load && - !isLegalMaskedGather(SrcVTy, MaybeAlign(Alignment))) || + !isLegalMaskedGather(SrcVTy, Align(Alignment))) || (Opcode == Instruction::Store && - !isLegalMaskedScatter(SrcVTy, MaybeAlign(Alignment)))) + !isLegalMaskedScatter(SrcVTy, Align(Alignment)))) Scalarize = true; // Gather / Scatter for vector 2 is not profitable on KNL / SKX // Vector-4 of gather/scatter instruction does not exist on KNL. @@ -4017,7 +4017,7 @@ return ST->hasMacroFusion() || ST->hasBranchFusion(); } -bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, MaybeAlign Alignment) { +bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) { if (!ST->hasAVX()) return false; @@ -4041,7 +4041,7 @@ ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI()); } -bool X86TTIImpl::isLegalMaskedStore(Type *DataType, MaybeAlign Alignment) { +bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) { return isLegalMaskedLoad(DataType, Alignment); } @@ -4108,7 +4108,7 @@ return isLegalMaskedExpandLoad(DataTy); } -bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, MaybeAlign Alignment) { +bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) { // Some CPUs have better gather performance than others. // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only // enable gather with a -march. @@ -4146,7 +4146,7 @@ return IntWidth == 32 || IntWidth == 64; } -bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, MaybeAlign Alignment) { +bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) { // AVX2 doesn't support scatter if (!ST->hasAVX512()) return false;